repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
alexzhou907/DreamPropeller
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
14,740
self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters():
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self,*args, **kwargs) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters():
broadcast(param, src=0)
9
2023-11-27 23:39:49+00:00
24k
abdulhaim/LMRL-Gym
llm_rl_scripts/wordle/ppo/train_ppo.py
[ { "identifier": "train_loop", "path": "LLM_RL/algorithms/ppo/train.py", "snippet": "def train_loop(\n trainer: PPOTrain, \n inference: PPOInference, \n policy: PPOPolicy, \n load_dataset: Callable[[PPOInference, PPOPolicy], Union[PPODataset, PPOIterableDataset]], \n evaluator: Optional[Ca...
from typing import Optional from JaxSeq.bucket_manager import open_with_bucket as open from transformers import AutoTokenizer from JaxSeq.utils import jsonl_stream, convert_path, load_mesh, setup_experiment_save from JaxSeq.utils import BlockingStrategy, Padding, Truncation, get_weight_decay_mask, create_path, get_enabled_save_path, MapIterable, FileOpenIterable from JaxSeq.models.gptj.interface import GPTJInference from JaxSeq.models.gptj.load import load_train_state, ModelLoadMode from LLM_RL.algorithms.ppo.train import train_loop from LLM_RL.algorithms.ppo.base_interface import ppo_loss_fn, FixedKLController, AdaptiveKLController from transformers.generation import GenerationConfig from jaxtyping import PyTree from LLM_RL.environment import text_env_eval, TextTrajectory, TextTrajectoryChain, TokenTrajectory, text_history_to_str from LLM_RL.algorithms.ppo.gptj.interface import GPTJPPOPolicy, GPTJPPOInference, GPTJPPOTrain from LLM_RL.heads.linear_head import load_train_state_from_config as load_head_train_state_from_config from LLM_RL.heads.linear_head import LinearHeadConfig from JaxSeq.shard_model import shard_params_from_params from LLM_RL.algorithms.ppo.data import PPODataset from LLM_RL.utils import get_tensor_stats_np from functools import partial from JaxSeq.logs import label_logs, log, pull_logs from JaxSeq.utils import multihost_device_get from JaxSeq.data import MaskIterableDataset from llm_rl_scripts.wordle.env.env import ReformatWordleEnvironment, WordleEnvironment from llm_rl_scripts.wordle.env.game import Vocabulary from dataclasses import replace from JaxSeq.models.gptj.interface import loss_fn_mask import tyro import jax import jax.numpy as jnp import os import optax import pickle as pkl import re import numpy as np import json
16,580
force_pad_embeddings=force_pad_embeddings, params_dtype=jnp.float32, ) policy_model.config.gradient_checkpointing = gradient_checkpointing policy_model.config.gradient_checkpointing_policy = gradient_checkpointing_policy policy_model.config.resid_pdrop = 0.0 policy_model.config.embd_pdrop = 0.0 policy_model.config.attn_pdrop = 0.0 with jax.default_device(jax.devices('cpu')[0]): initital_policy_params = jax.tree_util.tree_map( lambda x: multihost_device_get(x, mesh=mesh).copy(), policy_train_state.params, ) initital_policy_params = shard_params_from_params( model=policy_model, params=initital_policy_params, ) loop_state = dict() if should_restore_loop_state and (model_load_mode in {ModelLoadMode.TRAIN_STATE, ModelLoadMode.TRAIN_STATE_PARAMS, ModelLoadMode.PARAMS}): with open(os.path.join(convert_path(model_load_path), 'loop_state.pkl'), 'rb') as f: loop_state = pkl.load(f) policy_inference = GPTJInference.load_inference( params=policy_train_state.params, model=policy_model, tokenizer=tokenizer, ) vocab = Vocabulary.from_file( vocab_file=vocab_file, fill_cache=False, ) env = ReformatWordleEnvironment(WordleEnvironment(vocab, require_words_in_vocab=True, bad_word_reward=-10.0)) policy_prng = jax.random.PRNGKey(0) policy = GPTJPPOPolicy( inference=policy_inference, prng_key=policy_prng, generation_config=GenerationConfig( do_sample=policy_do_sample, num_beams=policy_num_beams, temperature=policy_temperature, top_p=policy_top_p, top_k=policy_top_k, eos_token_id=tokenizer.encode('\n')[0], pad_token_id=tokenizer.pad_token_id, max_new_tokens=max_output_length, ), blocking_strategy=BlockingStrategy( padding=Padding.LEFT, truncation=Truncation.LEFT, max_length=max_input_length, ), out_str_process=lambda x: x.removesuffix('\n')+'\n', ) def value_head_optim_getter(params: PyTree): mask = get_weight_decay_mask(("bias",))(params) optim = optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ) if grad_accum_steps is not None: optim = optax.MultiSteps( optim, every_k_schedule=grad_accum_steps, ) return optim head_prng_key = jax.random.PRNGKey(3) value_head_train_state, value_head = load_head_train_state_from_config( model_config=LinearHeadConfig( input_dim=policy_model.config.n_embd, output_dim=1, use_bias=True, initializer_range=0.0, bias_init=-4.1, ), model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32, optim_getter=value_head_optim_getter, mesh=mesh, prng_key=head_prng_key, pad_to_output_dim=None, params_dtype=jnp.float32, ) loss_f = partial(ppo_loss_fn, cliprange_value=cliprange_value, cliprange=cliprange, value_loss_coef=value_loss_coef) ppo_inference = GPTJPPOInference.load_inference( initial_policy_params=initital_policy_params, policy_params=policy_train_state.params, value_head_params=value_head_train_state.params, initial_policy_model=policy_model, policy_model=policy_model, value_head_model=value_head, tokenizer=tokenizer, loss_fn=loss_f, bc_loss_fn=loss_fn_mask, bc_loss_weight=bc_loss_weight, ) ppo_trainer = GPTJPPOTrain.load_train( policy_train_state=policy_train_state, value_head_train_state=value_head_train_state, policy_model=policy_model, value_head_model=value_head, tokenizer=tokenizer, loss_fn=loss_f, bc_loss_fn=loss_fn_mask, bc_loss_weight=bc_loss_weight, ) if use_adaptive_kl:
def main( model_load_mode: ModelLoadMode, model_load_path: str, bc_data_path: str, vocab_file: str, /, # Mark the end of positional arguments. exp_name: Optional[str]=None, outputs_path: Optional[str]=None, data_mesh_shape: int=1, fsdp_mesh_shape: int=1, model_mesh_shape: int=-1, use_wandb: bool=False, wandb_project: Optional[str]=None, n_rounds: int=1, epochs: int=1, max_steps: Optional[int]=None, lr: float=1e-5, weight_decay: float=0.0, train_bsize: int=32, train_bc_bsize: Optional[int]=None, grad_accum_steps: Optional[int]=None, rollout_bsize: int=32, n_rollouts: int=128, ppo_data_bsize: int=32, bf16_activations: bool=False, gradient_checkpointing: bool=False, gradient_checkpointing_policy: str='nothing_saveable', max_input_length: int=512, max_output_length: int=512, log_every: int=256, eval_every_steps: Optional[int]=None, eval_every_epochs: Optional[int]=None, eval_every_rounds: Optional[int]=None, eval_at_beginning: bool=False, eval_at_end: bool=True, save_every_steps: Optional[int]=None, save_every_epochs: Optional[int]=None, save_every_rounds: Optional[int]=None, save_at_beginning: bool=False, save_at_end: bool=False, save_best: bool=True, max_checkpoints: Optional[int]=None, save_train_state: bool=True, save_ppo_dataset: bool=True, save_bf16: bool=True, policy_do_sample: bool=True, policy_num_beams: int=1, policy_temperature: Optional[float]=None, policy_top_p: Optional[float]=None, policy_top_k: Optional[int]=None, gamma: float=1.0, lam: float=0.95, use_advantage_whitening: bool=True, init_kl_coef: float=0.001, kl_target: Optional[float]=None, kl_horizon: Optional[int]=None, cliprange_value: float=0.2, cliprange: float=0.2, value_loss_coef: float=1.0, bc_loss_weight: float=1.0, force_pad_embeddings: bool=False, should_restore_loop_state: bool=False, ): input_args = locals() print(input_args) use_adaptive_kl = (kl_target is not None and kl_horizon is not None) if not use_adaptive_kl: assert kl_target is None and kl_horizon is None tokenizer = AutoTokenizer.from_pretrained('EleutherAI/gpt-j-6B') tokenizer.add_special_tokens({'pad_token': '<|pad|>'}) mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp')) is_main_process = jax.process_index() == 0 print(f"Mesh: {mesh}") print(f"Is main process: {is_main_process}") # load data bc_data = MaskIterableDataset.blocked_from_str_segments_iterable( MapIterable(lambda x: [(tokenizer.bos_token, 0.0)]+x['sequence']+[(tokenizer.eos_token, 1.0)], FileOpenIterable(convert_path(bc_data_path), 'r', pipe=jsonl_stream)), tokenizer, blocking_strategy=BlockingStrategy( padding=Padding.RIGHT, truncation=Truncation.LEFT, max_length=max_input_length+max_output_length, ), ) def policy_optim_getter(params: PyTree): mask = get_weight_decay_mask(( "".join([r"\['ln_[0-9]+'\]", re.escape("['bias']")]), "".join([r"\['ln_[0-9]+'\]", re.escape("['scale']")]), re.escape("['ln_f']['bias']"), re.escape("['ln_f']['scale']"), "bias", ))(params) optim = optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ) if grad_accum_steps is not None: optim = optax.MultiSteps( optim, every_k_schedule=grad_accum_steps, ) return optim model_prng_key = jax.random.PRNGKey(2) policy_train_state, policy_model = load_train_state( model_load_mode=model_load_mode, model_load_path=convert_path(model_load_path) if model_load_mode != ModelLoadMode.HF else model_load_path, model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32, optim_getter=policy_optim_getter, tokenizer=tokenizer, mesh=mesh, prng_key=model_prng_key, force_pad_embeddings=force_pad_embeddings, params_dtype=jnp.float32, ) policy_model.config.gradient_checkpointing = gradient_checkpointing policy_model.config.gradient_checkpointing_policy = gradient_checkpointing_policy policy_model.config.resid_pdrop = 0.0 policy_model.config.embd_pdrop = 0.0 policy_model.config.attn_pdrop = 0.0 with jax.default_device(jax.devices('cpu')[0]): initital_policy_params = jax.tree_util.tree_map( lambda x: multihost_device_get(x, mesh=mesh).copy(), policy_train_state.params, ) initital_policy_params = shard_params_from_params( model=policy_model, params=initital_policy_params, ) loop_state = dict() if should_restore_loop_state and (model_load_mode in {ModelLoadMode.TRAIN_STATE, ModelLoadMode.TRAIN_STATE_PARAMS, ModelLoadMode.PARAMS}): with open(os.path.join(convert_path(model_load_path), 'loop_state.pkl'), 'rb') as f: loop_state = pkl.load(f) policy_inference = GPTJInference.load_inference( params=policy_train_state.params, model=policy_model, tokenizer=tokenizer, ) vocab = Vocabulary.from_file( vocab_file=vocab_file, fill_cache=False, ) env = ReformatWordleEnvironment(WordleEnvironment(vocab, require_words_in_vocab=True, bad_word_reward=-10.0)) policy_prng = jax.random.PRNGKey(0) policy = GPTJPPOPolicy( inference=policy_inference, prng_key=policy_prng, generation_config=GenerationConfig( do_sample=policy_do_sample, num_beams=policy_num_beams, temperature=policy_temperature, top_p=policy_top_p, top_k=policy_top_k, eos_token_id=tokenizer.encode('\n')[0], pad_token_id=tokenizer.pad_token_id, max_new_tokens=max_output_length, ), blocking_strategy=BlockingStrategy( padding=Padding.LEFT, truncation=Truncation.LEFT, max_length=max_input_length, ), out_str_process=lambda x: x.removesuffix('\n')+'\n', ) def value_head_optim_getter(params: PyTree): mask = get_weight_decay_mask(("bias",))(params) optim = optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ) if grad_accum_steps is not None: optim = optax.MultiSteps( optim, every_k_schedule=grad_accum_steps, ) return optim head_prng_key = jax.random.PRNGKey(3) value_head_train_state, value_head = load_head_train_state_from_config( model_config=LinearHeadConfig( input_dim=policy_model.config.n_embd, output_dim=1, use_bias=True, initializer_range=0.0, bias_init=-4.1, ), model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32, optim_getter=value_head_optim_getter, mesh=mesh, prng_key=head_prng_key, pad_to_output_dim=None, params_dtype=jnp.float32, ) loss_f = partial(ppo_loss_fn, cliprange_value=cliprange_value, cliprange=cliprange, value_loss_coef=value_loss_coef) ppo_inference = GPTJPPOInference.load_inference( initial_policy_params=initital_policy_params, policy_params=policy_train_state.params, value_head_params=value_head_train_state.params, initial_policy_model=policy_model, policy_model=policy_model, value_head_model=value_head, tokenizer=tokenizer, loss_fn=loss_f, bc_loss_fn=loss_fn_mask, bc_loss_weight=bc_loss_weight, ) ppo_trainer = GPTJPPOTrain.load_train( policy_train_state=policy_train_state, value_head_train_state=value_head_train_state, policy_model=policy_model, value_head_model=value_head, tokenizer=tokenizer, loss_fn=loss_f, bc_loss_fn=loss_fn_mask, bc_loss_weight=bc_loss_weight, ) if use_adaptive_kl:
kl_controller = AdaptiveKLController(init_kl_coef=init_kl_coef, target=kl_target, horizon=kl_horizon)
3
2023-11-21 00:16:42+00:00
24k
jzmzhong/Automatic-Prosody-Annotator-with-SSWP-CLAP
src/clap_module/conformer/encoder.py
[ { "identifier": "ConvolutionModule", "path": "src/clap_module/conformer/convolution.py", "snippet": "class ConvolutionModule(nn.Module):\r\n \"\"\"ConvolutionModule in Conformer model.\r\n\r\n Args:\r\n channels (int): The number of channels of conv layers.\r\n kernel_size (int): Ker...
import logging import torch import math from .convolution import ConvolutionModule from .encoder_layer import EncoderLayer from .modules import get_activation from .modules import VGG2L from .modules import ( LegacyRelPositionMultiHeadedAttention, MultiHeadedAttention, RelPositionMultiHeadedAttention, ) from .embedding import ( LegacyRelPositionalEncoding, PositionalEncoding, RelPositionalEncoding, ScaledPositionalEncoding, ) from .modules import LayerNorm from .multi_layer_conv import ( Conv1dLinear, MultiLayeredConv1d, ) from .modules import ( PositionwiseFeedForward, ) from .modules import repeat from .sub_sampling import Conv2dSubsampling from ..feature_fusion import AttentionPool1d, DAF, AFF, iAFF
14,564
attention_dim (int): Dimension of attention. attention_heads (int): The number of heads of multi head attention. linear_units (int): The number of units of position-wise feed forward. num_blocks (int): The number of decoder blocks. dropout_rate (float): Dropout rate. positional_dropout_rate (float): Dropout rate after adding positional encoding. attention_dropout_rate (float): Dropout rate in attention. input_layer (Union[str, torch.nn.Module]): Input layer type. normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear". positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer. macaron_style (bool): Whether to use macaron style for positionwise layer. pos_enc_layer_type (str): Encoder positional encoding layer type. selfattention_layer_type (str): Encoder attention layer type. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. zero_triu (bool): Whether to zero the upper triangular part of attention matrix. cnn_module_kernel (int): Kernerl size of convolution module. padding_idx (int): Padding idx for input_layer=embed. stochastic_depth_rate (float): Maximum probability to skip the encoder layer. intermediate_layers (Union[List[int], None]): indices of intermediate CTC layer. indices start from 1. if not None, intermediate outputs are returned (which changes return type signature.) """ def __init__( self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks=6, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer="conv2d", normalize_before=True, concat_after=False, ffn_layer_type="linear", ffn_conv_kernel_size=1, macaron_style=False, pos_enc_layer_type="abs_pos", selfattention_layer_type="selfattn", activation_type="relu", use_cnn_module=True, zero_triu=False, cnn_module_kernel=31, padding_idx=-1, stochastic_depth_rate=0.0, intermediate_layers=None, ctc_softmax=None, conditioning_layer_dim=None, max_seq_len=100, enable_fusion=False, fusion_type="", ): """Construct an Encoder object.""" super(Encoder, self).__init__() self.max_seq_len = max_seq_len activation = get_activation(activation_type) if pos_enc_layer_type == "abs_pos": pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "scaled_abs_pos": pos_enc_class = ScaledPositionalEncoding elif pos_enc_layer_type == "rel_pos": assert selfattention_layer_type == "rel_selfattn" pos_enc_class = RelPositionalEncoding elif pos_enc_layer_type == "legacy_rel_pos": assert selfattention_layer_type == "legacy_rel_selfattn" pos_enc_class = LegacyRelPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) self.conv_subsampling_factor = 1 if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(idim, attention_dim), torch.nn.LayerNorm(attention_dim), torch.nn.Dropout(dropout_rate), pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling( idim, attention_dim, dropout_rate, pos_enc_class(attention_dim, positional_dropout_rate), ) self.conv_subsampling_factor = 4 elif input_layer == "vgg2l": self.embed = VGG2L(idim, attention_dim) self.conv_subsampling_factor = 4 elif input_layer == "embed": self.embed = torch.nn.Sequential( torch.nn.Embedding(idim, attention_dim, padding_idx=padding_idx), pos_enc_class(attention_dim, positional_dropout_rate), ) elif isinstance(input_layer, torch.nn.Module): self.embed = torch.nn.Sequential( input_layer, pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer is None: self.embed = torch.nn.Sequential( pos_enc_class(attention_dim, positional_dropout_rate) ) else: raise ValueError("unknown input_layer: " + input_layer) self.normalize_before = normalize_before # self-attention module definition if selfattention_layer_type == "selfattn": logging.info("encoder self-attention layer type = self-attention")
# Copyright 2020 Johns Hopkins University (Shinji Watanabe) # Northwestern Polytechnical University (Pengcheng Guo) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Encoder definition.""" class Encoder(torch.nn.Module): """Conformer encoder module. Args: idim (int): Input dimension. attention_dim (int): Dimension of attention. attention_heads (int): The number of heads of multi head attention. linear_units (int): The number of units of position-wise feed forward. num_blocks (int): The number of decoder blocks. dropout_rate (float): Dropout rate. positional_dropout_rate (float): Dropout rate after adding positional encoding. attention_dropout_rate (float): Dropout rate in attention. input_layer (Union[str, torch.nn.Module]): Input layer type. normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear". positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer. macaron_style (bool): Whether to use macaron style for positionwise layer. pos_enc_layer_type (str): Encoder positional encoding layer type. selfattention_layer_type (str): Encoder attention layer type. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. zero_triu (bool): Whether to zero the upper triangular part of attention matrix. cnn_module_kernel (int): Kernerl size of convolution module. padding_idx (int): Padding idx for input_layer=embed. stochastic_depth_rate (float): Maximum probability to skip the encoder layer. intermediate_layers (Union[List[int], None]): indices of intermediate CTC layer. indices start from 1. if not None, intermediate outputs are returned (which changes return type signature.) """ def __init__( self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks=6, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer="conv2d", normalize_before=True, concat_after=False, ffn_layer_type="linear", ffn_conv_kernel_size=1, macaron_style=False, pos_enc_layer_type="abs_pos", selfattention_layer_type="selfattn", activation_type="relu", use_cnn_module=True, zero_triu=False, cnn_module_kernel=31, padding_idx=-1, stochastic_depth_rate=0.0, intermediate_layers=None, ctc_softmax=None, conditioning_layer_dim=None, max_seq_len=100, enable_fusion=False, fusion_type="", ): """Construct an Encoder object.""" super(Encoder, self).__init__() self.max_seq_len = max_seq_len activation = get_activation(activation_type) if pos_enc_layer_type == "abs_pos": pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "scaled_abs_pos": pos_enc_class = ScaledPositionalEncoding elif pos_enc_layer_type == "rel_pos": assert selfattention_layer_type == "rel_selfattn" pos_enc_class = RelPositionalEncoding elif pos_enc_layer_type == "legacy_rel_pos": assert selfattention_layer_type == "legacy_rel_selfattn" pos_enc_class = LegacyRelPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) self.conv_subsampling_factor = 1 if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(idim, attention_dim), torch.nn.LayerNorm(attention_dim), torch.nn.Dropout(dropout_rate), pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling( idim, attention_dim, dropout_rate, pos_enc_class(attention_dim, positional_dropout_rate), ) self.conv_subsampling_factor = 4 elif input_layer == "vgg2l": self.embed = VGG2L(idim, attention_dim) self.conv_subsampling_factor = 4 elif input_layer == "embed": self.embed = torch.nn.Sequential( torch.nn.Embedding(idim, attention_dim, padding_idx=padding_idx), pos_enc_class(attention_dim, positional_dropout_rate), ) elif isinstance(input_layer, torch.nn.Module): self.embed = torch.nn.Sequential( input_layer, pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer is None: self.embed = torch.nn.Sequential( pos_enc_class(attention_dim, positional_dropout_rate) ) else: raise ValueError("unknown input_layer: " + input_layer) self.normalize_before = normalize_before # self-attention module definition if selfattention_layer_type == "selfattn": logging.info("encoder self-attention layer type = self-attention")
encoder_selfattn_layer = MultiHeadedAttention
5
2023-11-25 02:38:32+00:00
24k
Luo-Z13/pointobb
PointOBB/mmdet/models/roi_heads/PointOBB_head.py
[ { "identifier": "HEADS", "path": "PointOBB/mmdet/models/builder.py", "snippet": "HEADS = MODELS" }, { "identifier": "MODELS", "path": "PointOBB/mmdet/models/builder.py", "snippet": "MODELS = Registry('models', parent=MMCV_MODELS)" }, { "identifier": "build_head", "path": "Poi...
import math import torch import torch.nn.functional as F import torch.nn as nn import copy import numpy as np import cv2 from mmdet.core import bbox2result, bbox2roi, rbbox2roi, build_assigner, build_sampler, multi_apply from ..builder import HEADS, MODELS, build_head, build_roi_extractor, build_loss from .standard_roi_head import StandardRoIHead from .cascade_roi_head import CascadeRoIHead from mmdet.core.bbox.iou_calculators import bbox_overlaps from .test_mixins import BBoxTestMixin, MaskTestMixin from mmdet.core.bbox import bbox_xyxy_to_cxcywh from mmdet.core.bbox.transforms import rbbox2result from mmcv.cnn import Scale, ConvModule from mmcv.ops import box_iou_rotated from typing import Any, List, Sequence, Tuple, Union from torch import Tensor from mmdet.models.utils.base_bbox_coder import BaseBBoxCoder from ..detectors.utils import obb2xyxy, regularize_boxes, reduce_mean, obb2poly_np
16,504
bbox_results = self._bbox_forward(x, rois) outs = outs + (bbox_results['cls_score'], bbox_results['bbox_pred']) return outs def grid_priors(self, featmap_sizes: List[Tuple], dtype: torch.dtype = torch.float32, device = 'cuda', with_stride: bool = False): num_levels = len(self.featmap_strides) assert num_levels == len(featmap_sizes) multi_level_priors = [] for i in range(num_levels): priors = self.single_level_grid_priors( featmap_sizes[i], level_idx=i, dtype=dtype, device=device, with_stride=with_stride) multi_level_priors.append(priors) return multi_level_priors def single_level_grid_priors(self, featmap_size: Tuple[int], level_idx: int, dtype: torch.dtype = torch.float32, device = 'cuda', offset = 0.5, with_stride: bool = False) -> Tensor: feat_h, feat_w = featmap_size stride_w = self.featmap_strides[level_idx] stride_h = stride_w shift_x = ((torch.arange(0, feat_w, device=device) + offset) * stride_w).to(dtype) shift_y = ((torch.arange(0, feat_h, device=device) + offset) * stride_h).to(dtype) shift_xx, shift_yy = meshgrid(shift_x, shift_y) if not with_stride: shifts = torch.stack([shift_xx, shift_yy], dim=-1) else: stride_w = shift_xx.new_full((shift_xx.shape[0], ), stride_w).to(dtype) stride_h = shift_xx.new_full((shift_yy.shape[0], ), stride_h).to(dtype) shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h], dim=-1) all_points = shifts.to(device) return all_points def get_targets(self, x, points, gt_points, proposals, gt_labels, img_metas): self.norm_on_bbox = True num_levels = len(x) concat_points = torch.cat(points, dim=0) # the number of points per img, per lvl num_points = [center.size(0) for center in points] labels_list, angle_targets_list, id_targets_list = multi_apply( self._get_targets_single, gt_points, proposals, gt_labels, img_metas, points=concat_points, num_points_per_lvl=num_points) # split to per img, per level labels_list = [labels.split(num_points, 0) for labels in labels_list] angle_targets_list = [ angle_targets.split(num_points, 0) for angle_targets in angle_targets_list ] id_targets_list = [ id_targets.split(num_points, 0) for id_targets in id_targets_list ] # concat per level image concat_lvl_labels = [] concat_lvl_angle_targets = [] concat_lvl_id_targets = [] for i in range(num_levels): concat_lvl_labels.append( torch.cat([labels[i] for labels in labels_list])) # bbox_targets = torch.cat( # [bbox_targets[i] for bbox_targets in bbox_targets_list]) angle_targets = torch.cat( [angle_targets[i] for angle_targets in angle_targets_list]) id_targets = torch.cat( [id_targets[i] for id_targets in id_targets_list]) concat_lvl_angle_targets.append(angle_targets) concat_lvl_id_targets.append(id_targets) return (concat_lvl_labels, concat_lvl_angle_targets, concat_lvl_id_targets) def _get_targets_single( self, gt_points, proposals, gt_label, img_meta, points, num_points_per_lvl: List[int]) -> Tuple[Tensor, Tensor, Tensor]: """Compute regression and classification targets for a single image.""" self.center_sampling = True self.center_sample_radius = 1.5 self.pseudow = 3 self.pseudoh = 2 num_points = points.size(0) num_gts = len(gt_points) gt_labels = gt_label gt_bid = img_meta['gt_bid'] gen_proposals = proposals.reshape(len(gt_points), -1, proposals.size(-1)) if gt_points.size(-1) == 2: extra_tensor = torch.tensor([self.pseudow, self.pseudoh, gen_proposals[0,0,-1]], device=gt_points.device, dtype=gt_points.dtype).repeat(len(gt_points), 1) gt_bboxes = torch.cat((gt_points, extra_tensor), dim=1) else: gt_bboxes = gt_points.clone() if num_gts == 0: return gt_labels.new_full((num_points,), self.num_classes), \ gt_bboxes.new_zeros((num_points, 1)), \ gt_bboxes.new_zeros((num_points,)) areas = (gt_bboxes[:,2] * gt_bboxes[:,3]).squeeze()
RangeType = Sequence[Tuple[int, int]] INF = 1e8 def meshgrid(x: Tensor, y: Tensor, row_major: bool = True) -> Tuple[Tensor, Tensor]: yy, xx = torch.meshgrid(y, x) if row_major: # warning .flatten() would cause error in ONNX exportingF # have to use reshape here return xx.reshape(-1), yy.reshape(-1) else: return yy.reshape(-1), xx.reshape(-1) def obb2cxcywh_le90(obboxes): """Convert oriented bounding boxes to horizontal bounding boxes. Args: obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle] Returns: hbbs (torch.Tensor): [x_lt,y_lt,x_rb,y_rb] """ center, w, h, theta = torch.split(obboxes, [2, 1, 1, 1], dim=-1) Cos, Sin = torch.cos(theta), torch.sin(theta) x_bias = torch.abs(w / 2 * Cos) + torch.abs(h / 2 * Sin) y_bias = torch.abs(w / 2 * Sin) + torch.abs(h / 2 * Cos) bias = torch.cat([x_bias, y_bias], dim=-1) wh = bias * 2 return torch.cat([center, wh, torch.zeros_like(theta)], dim=-1) @HEADS.register_module() class PSCCoder(BaseBBoxCoder): """Phase-Shifting Coder. `Phase-Shifting Coder (PSC) <https://arxiv.org/abs/2211.06368>`. Args: angle_version (str): Angle definition. Only 'le90' is supported at present. dual_freq (bool, optional): Use dual frequency. Default: True. num_step (int, optional): Number of phase steps. Default: 3. thr_mod (float): Threshold of modulation. Default: 0.47. """ def __init__(self, angle_version: str, dual_freq: bool = True, num_step: int = 3, thr_mod: float = 0.47): super().__init__() self.angle_version = angle_version assert angle_version in ['le90'] self.dual_freq = dual_freq self.num_step = num_step self.thr_mod = thr_mod if self.dual_freq: self.encode_size = 2 * self.num_step else: self.encode_size = self.num_step self.coef_sin = torch.tensor( tuple( torch.sin(torch.tensor(2 * k * math.pi / self.num_step)) for k in range(self.num_step))) self.coef_cos = torch.tensor( tuple( torch.cos(torch.tensor(2 * k * math.pi / self.num_step)) for k in range(self.num_step))) def encode(self, angle_targets: Tensor) -> Tensor: """Phase-Shifting Encoder. Args: angle_targets (Tensor): Angle offset for each scale level. Has shape (num_anchors * H * W, 1) Returns: list[Tensor]: The psc coded data (phase-shifting patterns) for each scale level. Has shape (num_anchors * H * W, encode_size) """ phase_targets = angle_targets * 2 phase_shift_targets = tuple( torch.cos(phase_targets + 2 * math.pi * x / self.num_step) for x in range(self.num_step)) # Dual-freq PSC for square-like problem if self.dual_freq: phase_targets = angle_targets * 4 phase_shift_targets += tuple( torch.cos(phase_targets + 2 * math.pi * x / self.num_step) for x in range(self.num_step)) return torch.cat(phase_shift_targets, axis=-1) def decode(self, angle_preds: Tensor, keepdim: bool = False) -> Tensor: """Phase-Shifting Decoder. Args: angle_preds (Tensor): The psc coded data (phase-shifting patterns) for each scale level. Has shape (num_anchors * H * W, encode_size) keepdim (bool): Whether the output tensor has dim retained or not. Returns: list[Tensor]: Angle offset for each scale level. Has shape (num_anchors * H * W, 1) when keepdim is true, (num_anchors * H * W) otherwise """ self.coef_sin = self.coef_sin.to(angle_preds) self.coef_cos = self.coef_cos.to(angle_preds) phase_sin = torch.sum( angle_preds[:, 0:self.num_step] * self.coef_sin, dim=-1, keepdim=keepdim) phase_cos = torch.sum( angle_preds[:, 0:self.num_step] * self.coef_cos, dim=-1, keepdim=keepdim) phase_mod = phase_cos**2 + phase_sin**2 phase = -torch.atan2(phase_sin, phase_cos) # In range [-pi,pi) if self.dual_freq: phase_sin = torch.sum( angle_preds[:, self.num_step:(2 * self.num_step)] * self.coef_sin, dim=-1, keepdim=keepdim) phase_cos = torch.sum( angle_preds[:, self.num_step:(2 * self.num_step)] * self.coef_cos, dim=-1, keepdim=keepdim) phase_mod = phase_cos**2 + phase_sin**2 phase2 = -torch.atan2(phase_sin, phase_cos) / 2 # Phase unwarpping, dual freq mixing # Angle between phase and phase2 is obtuse angle idx = torch.cos(phase) * torch.cos(phase2) + torch.sin( phase) * torch.sin(phase2) < 0 # Add pi to phase2 and keep it in range [-pi,pi) phase2[idx] = phase2[idx] % (2 * math.pi) - math.pi phase = phase2 # Set the angle of isotropic objects to zero phase[phase_mod < self.thr_mod] *= 0 angle_pred = phase / 2 return angle_pred @HEADS.register_module() class PointOBBHead(StandardRoIHead): """Simplest base roi head including one bbox head and one mask head.""" def __init__(self, bbox_roi_extractor, num_stages, bbox_head, top_k=7, with_atten=None, conv_cfg=None, norm_cfg=None, scale_angle: bool = True, stacked_convs = 4, loss_symmetry_ss=dict( type='SmoothL1Loss', loss_weight=1.0, beta=0.1), angle_coder=dict( type='PSCCoder', angle_version='le90', dual_freq=False, num_step=3, thr_mod=0), angle_version = 'le90', use_angle_loss = True, add_angle_pred_begin = False, not_use_rot_mil = False, detach_angle_head = False, rotation_agnostic_classes = None, agnostic_resize_classes = None, cls_scores_weight = 1.0, ins_scores_weight = 1.0, **kwargs): super(PointOBBHead, self).__init__(bbox_roi_extractor=bbox_roi_extractor, bbox_head=bbox_head, **kwargs) self.threshold = 0.3 self.merge_mode = 'weighted_clsins' self.test_mean_iou = False # self.test_mean_iou = True self.sum_iou = 0 self.sum_num = 0 self.num_stages = num_stages self.topk1 = top_k # 7 self.topk2 = top_k # 7 self.featmap_strides = bbox_roi_extractor.featmap_strides self.with_atten = with_atten self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.in_channels=256 self.feat_channels=256 self.stacked_convs=stacked_convs self.is_scale_angle = scale_angle self.angle_coder = HEADS.build(angle_coder) self.loss_symmetry_ss = build_loss(loss_symmetry_ss) self.angle_version = angle_version self.rotation_agnostic_classes = rotation_agnostic_classes self.agnostic_resize_classes = agnostic_resize_classes self.add_angle_pred_begin = add_angle_pred_begin self.use_angle_loss = use_angle_loss self.not_use_rot_mil = not_use_rot_mil self.detach_angle_head = detach_angle_head self.cls_scores_weight = cls_scores_weight self.ins_scores_weight = ins_scores_weight self.num_classes = self.bbox_head.num_classes self._init_layers() def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_angle = nn.Conv2d( self.feat_channels, self.angle_coder.encode_size, 3, padding=1) if self.is_scale_angle: self.scale_angle = Scale(1.0) def angle_forward(self, feats: Tuple[Tensor]) -> Tuple[List[Tensor], List[Tensor]]: angle_results = [] for feat in feats: if self.detach_angle_head: feat_detach = feat.clone().detach() single_angle_pred = self.angle_forward_single(feat_detach) else: single_angle_pred = self.angle_forward_single(feat) angle_results.append(single_angle_pred) return tuple(angle_results) def angle_forward_single(self, x: Tensor): cls_feat = x for cls_layer in self.cls_convs: cls_feat = cls_layer(cls_feat) # cls_score = self.conv_cls(cls_feat) angle_pred = self.conv_angle(cls_feat) if self.is_scale_angle: angle_pred = self.scale_angle(angle_pred).float() return angle_pred def init_assigner_sampler(self): """Initialize assigner and sampler.""" self.bbox_assigner = None self.bbox_sampler = None if self.train_cfg: self.bbox_assigner = build_assigner(self.train_cfg.assigner) self.bbox_sampler = build_sampler( self.train_cfg.sampler, context=self) def init_bbox_head(self, bbox_roi_extractor, bbox_head): """Initialize ``bbox_head``""" self.bbox_roi_extractor = build_roi_extractor(bbox_roi_extractor) # self.cdb = build_head(dict(type='ConvConcreteDB', cfg=None, planes=256)) self.bbox_head = build_head(bbox_head) def init_mask_head(self, mask_roi_extractor, mask_head): """Initialize ``mask_head``""" if mask_roi_extractor is not None: self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor) self.share_roi_extractor = False else: self.share_roi_extractor = True self.mask_roi_extractor = self.bbox_roi_extractor self.mask_head = build_head(mask_head) def forward_dummy(self, x, proposals): """Dummy forward function.""" # bbox head outs = () rois = bbox2roi([proposals]) if self.with_bbox: bbox_results = self._bbox_forward(x, rois) outs = outs + (bbox_results['cls_score'], bbox_results['bbox_pred']) return outs def grid_priors(self, featmap_sizes: List[Tuple], dtype: torch.dtype = torch.float32, device = 'cuda', with_stride: bool = False): num_levels = len(self.featmap_strides) assert num_levels == len(featmap_sizes) multi_level_priors = [] for i in range(num_levels): priors = self.single_level_grid_priors( featmap_sizes[i], level_idx=i, dtype=dtype, device=device, with_stride=with_stride) multi_level_priors.append(priors) return multi_level_priors def single_level_grid_priors(self, featmap_size: Tuple[int], level_idx: int, dtype: torch.dtype = torch.float32, device = 'cuda', offset = 0.5, with_stride: bool = False) -> Tensor: feat_h, feat_w = featmap_size stride_w = self.featmap_strides[level_idx] stride_h = stride_w shift_x = ((torch.arange(0, feat_w, device=device) + offset) * stride_w).to(dtype) shift_y = ((torch.arange(0, feat_h, device=device) + offset) * stride_h).to(dtype) shift_xx, shift_yy = meshgrid(shift_x, shift_y) if not with_stride: shifts = torch.stack([shift_xx, shift_yy], dim=-1) else: stride_w = shift_xx.new_full((shift_xx.shape[0], ), stride_w).to(dtype) stride_h = shift_xx.new_full((shift_yy.shape[0], ), stride_h).to(dtype) shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h], dim=-1) all_points = shifts.to(device) return all_points def get_targets(self, x, points, gt_points, proposals, gt_labels, img_metas): self.norm_on_bbox = True num_levels = len(x) concat_points = torch.cat(points, dim=0) # the number of points per img, per lvl num_points = [center.size(0) for center in points] labels_list, angle_targets_list, id_targets_list = multi_apply( self._get_targets_single, gt_points, proposals, gt_labels, img_metas, points=concat_points, num_points_per_lvl=num_points) # split to per img, per level labels_list = [labels.split(num_points, 0) for labels in labels_list] angle_targets_list = [ angle_targets.split(num_points, 0) for angle_targets in angle_targets_list ] id_targets_list = [ id_targets.split(num_points, 0) for id_targets in id_targets_list ] # concat per level image concat_lvl_labels = [] concat_lvl_angle_targets = [] concat_lvl_id_targets = [] for i in range(num_levels): concat_lvl_labels.append( torch.cat([labels[i] for labels in labels_list])) # bbox_targets = torch.cat( # [bbox_targets[i] for bbox_targets in bbox_targets_list]) angle_targets = torch.cat( [angle_targets[i] for angle_targets in angle_targets_list]) id_targets = torch.cat( [id_targets[i] for id_targets in id_targets_list]) concat_lvl_angle_targets.append(angle_targets) concat_lvl_id_targets.append(id_targets) return (concat_lvl_labels, concat_lvl_angle_targets, concat_lvl_id_targets) def _get_targets_single( self, gt_points, proposals, gt_label, img_meta, points, num_points_per_lvl: List[int]) -> Tuple[Tensor, Tensor, Tensor]: """Compute regression and classification targets for a single image.""" self.center_sampling = True self.center_sample_radius = 1.5 self.pseudow = 3 self.pseudoh = 2 num_points = points.size(0) num_gts = len(gt_points) gt_labels = gt_label gt_bid = img_meta['gt_bid'] gen_proposals = proposals.reshape(len(gt_points), -1, proposals.size(-1)) if gt_points.size(-1) == 2: extra_tensor = torch.tensor([self.pseudow, self.pseudoh, gen_proposals[0,0,-1]], device=gt_points.device, dtype=gt_points.dtype).repeat(len(gt_points), 1) gt_bboxes = torch.cat((gt_points, extra_tensor), dim=1) else: gt_bboxes = gt_points.clone() if num_gts == 0: return gt_labels.new_full((num_points,), self.num_classes), \ gt_bboxes.new_zeros((num_points, 1)), \ gt_bboxes.new_zeros((num_points,)) areas = (gt_bboxes[:,2] * gt_bboxes[:,3]).squeeze()
gt_bboxes = regularize_boxes(gt_bboxes, pattern=self.angle_version)
10
2023-11-20 07:50:12+00:00
24k
ModelTC/EasyLLM
llm/models/hf_models/qwen_vl/modeling_qwen.py
[ { "identifier": "QWenConfig", "path": "llm/models/hf_models/qwen_vl/configuration_qwen.py", "snippet": "class QWenConfig(PretrainedConfig):\n model_type = \"qwen\"\n keys_to_ignore_at_inference = [\"past_key_values\"]\n\n def __init__(\n self,\n vocab_size=151936,\n hidden_...
import importlib import math import torch # noqa import torch.nn.functional as F # noqa import torch.utils.checkpoint # noqa from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union, Callable, List, Any, Generator # noqa from torch.cuda.amp import autocast # noqa from torch.nn import CrossEntropyLoss from transformers import PreTrainedTokenizer, GenerationConfig, StoppingCriteriaList # noqa from transformers.generation.logits_process import LogitsProcessorList # noqa from transformers.generation.streamers import BaseStreamer # noqa from transformers.generation.utils import GenerateOutput # noqa from transformers.modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, ) from transformers.modeling_utils import PreTrainedModel # noqa from transformers.utils import logging from einops import rearrange from torch import nn from .configuration_qwen import QWenConfig # noqa from .qwen_generation_utils import ( make_context, ) # noqa from llm.models.hf_models.qwen.qwen_generation_utils import ( HistoryType, decode_tokens, get_stop_words_ids, ) from .visual import VisionTransformer from llm.models.hf_models.qwen.modeling_qwen import RMSNorm, apply_rotary_pos_emb, QWenMLP from llm.models.hf_models.qwen.modeling_qwen import QWenAttention as QWenAttention_chat from llm.models.hf_models.qwen.modeling_qwen import QWenModel as QWenModel_chat from llm.models.hf_models.qwen.modeling_qwen import QWenLMHeadModel as QWenLMHeadModel_chat from einops import rearrange
15,993
input_shape[-1] + past_length, dtype=torch.long, device=device, ) position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) encoder_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) if inputs_embeds is None: inputs_embeds = self.wte(input_ids) if batch_size <= 0: raise ValueError("batch_size has to be defined and > 0") attention_mask = self._prepare_decoder_attention_mask( attention_mask, input_shape, inputs_embeds, past_length ) hidden_states = inputs_embeds kv_seq_len = hidden_states.size()[1] if past_key_values[0] is not None: # past key values[0][0] shape: bs * seq_len * head_num * dim kv_seq_len += past_key_values[0][0].shape[1] if ( self.use_dynamic_ntk and kv_seq_len == hidden_states.size()[1] and not self.training ): context_value = math.log(kv_seq_len / self.seq_length, 2) + 1 ntk_alpha = 2 ** math.ceil(context_value) - 1 ntk_alpha = max(ntk_alpha, 1) else: ntk_alpha = self.rotary_emb._ntk_alpha_cached rotary_pos_emb = self.rotary_emb(kv_seq_len, ntk_alpha=ntk_alpha) for idx in range(len(rotary_pos_emb)): rotary_pos_emb[idx] = rotary_pos_emb[idx].to(hidden_states.device) hidden_states = self.drop(hidden_states).clone() if fake_images is not None: hidden_states = hidden_states + images.mean() * 0 elif images is not None: for idx, (i, a, b) in enumerate(img_pos): hidden_states[i][a + 1 : b] = images[idx] output_shape = input_shape + (hidden_states.size(-1),) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, use_cache, output_attentions) return custom_forward outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, rotary_pos_emb, self.registered_causal_mask, None, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask, ) else: outputs = block( hidden_states, layer_past=layer_past, rotary_pos_emb=rotary_pos_emb, registered_causal_mask=self.registered_causal_mask, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) hidden_states = self.ln_f(hidden_states) hidden_states = hidden_states.view(output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, presents, all_hidden_states] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, )
# Copyright (c) Alibaba Cloud. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. if TYPE_CHECKING: try: except ImportError: rearrange = None SUPPORT_CUDA = torch.cuda.is_available() SUPPORT_BF16 = SUPPORT_CUDA and torch.cuda.is_bf16_supported() SUPPORT_FP16 = SUPPORT_CUDA and torch.cuda.get_device_capability(0)[0] >= 7 logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "qwen" _CONFIG_FOR_DOC = "QWenConfig" QWen_PRETRAINED_MODEL_ARCHIVE_LIST = ["qwen-7b"] _ERROR_BAD_CHAT_FORMAT = """\ We detect you are probably using the pretrained model (rather than chat model) for chatting, since the chat_format in generation_config is not "chatml". If you are directly using the model downloaded from Huggingface, please make sure you are using our "Qwen/Qwen-7B-Chat" Huggingface model (rather than "Qwen/Qwen-7B") when you call model.chat(). 我们检测到您可能在使用预训练模型(而非chat模型)进行多轮chat,因为您当前在generation_config指定的chat_format,并未设置为我们在对话中所支持的"chatml"格式。 如果您在直接使用我们从Huggingface提供的模型,请确保您在调用model.chat()时,使用的是"Qwen/Qwen-7B-Chat"模型(而非"Qwen/Qwen-7B"预训练模型)。 """ _SENTINEL = object() _ERROR_STREAM_IN_CHAT = """\ Pass argument `stream` to model.chat() is buggy, deprecated, and marked for removal. Please use model.chat_stream(...) instead of model.chat(..., stream=True). 向model.chat()传入参数stream的用法可能存在Bug,该用法已被废弃,将在未来被移除。请使用model.chat_stream(...)代替model.chat(..., stream=True)。 """ apply_rotary_emb_func = None rms_norm = None # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class QWenAttention(QWenAttention_chat): def __init__(self, config): super().__init__(config) def _attn(self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None): attn_weights = torch.matmul(query, key.transpose(-1, -2)) if self.scale_attn_weights: attn_weights = attn_weights / torch.full( [], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device, ) attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = attn_weights.type(value.dtype) attn_weights = self.attn_dropout(attn_weights) if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2) return attn_output, attn_weights def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], rotary_pos_emb: Optional[List[torch.Tensor]] = None, registered_causal_mask: Optional[torch.Tensor] = None, layer_past: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ): mixed_x_layer = self.c_attn(hidden_states) query, key, value = mixed_x_layer.split(self.split_size, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if rotary_pos_emb is not None: cur_len = query.shape[1] rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb] rotary_pos_emb = (rotary_pos_emb,) * 2 q_pos_emb, k_pos_emb = rotary_pos_emb # Slice the pos emb for current inference query = apply_rotary_pos_emb(query, q_pos_emb) key = apply_rotary_pos_emb(key, k_pos_emb) if layer_past is not None: past_key, past_value = layer_past[0], layer_past[1] key = torch.cat((past_key, key), dim=1) value = torch.cat((past_value, value), dim=1) if use_cache: present = (key, value) else: present = None if self.use_logn_attn and not self.training: if self.logn_tensor.device != query.device or self.logn_tensor.dtype != query.dtype: self.logn_tensor = self.logn_tensor.to(query.device).type_as(query) seq_start = key.size(1) - query.size(1) seq_end = key.size(1) logn_tensor = self.logn_tensor[:, seq_start:seq_end, :, :] query = query * logn_tensor.expand_as(query) query = query.permute(0, 2, 1, 3) key = key.permute(0, 2, 1, 3) value = value.permute(0, 2, 1, 3) attn_output, attn_weight = self._attn( query, key, value, registered_causal_mask, attention_mask, head_mask ) context_layer = self._merge_heads( attn_output, self.num_heads, self.head_dim ) attn_output = self.c_proj(context_layer) outputs = (attn_output, present) if output_attentions: outputs += (attn_weight,) return outputs class QWenBlock(nn.Module): def __init__(self, config): super().__init__() hidden_size = config.hidden_size self.bf16 = config.bf16 self.ln_1 = RMSNorm( hidden_size, eps=config.layer_norm_epsilon, ) self.attn = QWenAttention(config) self.ln_2 = RMSNorm( hidden_size, eps=config.layer_norm_epsilon, ) self.mlp = QWenMLP(config) def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], rotary_pos_emb: Optional[List[torch.Tensor]] = None, registered_causal_mask: Optional[torch.Tensor] = None, layer_past: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, ): layernorm_output = self.ln_1(hidden_states) attn_outputs = self.attn( layernorm_output, rotary_pos_emb, registered_causal_mask=registered_causal_mask, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, ) attn_output = attn_outputs[0] outputs = attn_outputs[1:] residual = hidden_states layernorm_input = attn_output + residual layernorm_output = self.ln_2(layernorm_input) residual = layernorm_input mlp_output = self.mlp(layernorm_output) hidden_states = residual + mlp_output if use_cache: outputs = (hidden_states,) + outputs else: outputs = (hidden_states,) + outputs[1:] return outputs class QWenModel(QWenModel_chat): _keys_to_ignore_on_load_missing = ["attn.masked_bias"] def __init__(self, config): super().__init__(config) dim = ( self.rotary_ndims if self.rotary_ndims is not None else config.kv_channels ) self.rotary_emb = RotaryEmbedding(dim, base=config.rotary_emb_base) self.registered_causal_mask = None self.h = nn.ModuleList( [ QWenBlock( config ) for i in range(config.num_hidden_layers) ] ) self.visual = VisionTransformer(**config.visual) def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, device=inputs_embeds.device, past_key_values_length=past_key_values_length, ) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( inputs_embeds.device ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): if past_key_values is None and torch.any(input_ids == self.config.visual['image_start_id']): bos_pos = torch.where(input_ids == self.config.visual['image_start_id']) eos_pos = torch.where(input_ids == self.config.visual['image_start_id'] + 1) assert (bos_pos[0] == eos_pos[0]).all() img_pos = torch.stack((bos_pos[0], bos_pos[1], eos_pos[1]), dim=1) images = [] for i, a, b in img_pos: image = input_ids[i][a + 1 : b - 1].tolist() image = image[: image.index(self.config.visual['image_start_id'] + 2)] images.append(bytes(image).decode('utf-8')) images = self.visual.encode(images) assert images.shape[0] == len(images) fake_images = None elif self.training: fake_images = torch.zeros(1, 3, 224, 224).to( dtype=self.visual.conv1.weight.dtype, device=self.visual.conv1.weight.device) images = self.visual(fake_images) else: fake_images = None images = None output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) if input_ids is not None and inputs_embeds is not None: raise ValueError( "You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) batch_size = input_ids.shape[0] elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size = inputs_embeds.shape[0] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1]) if position_ids is not None: position_ids = position_ids.view(-1, input_shape[-1]) if past_key_values is None: past_length = 0 past_key_values = tuple([None] * len(self.h)) else: past_length = past_key_values[0][0].size(-2) if position_ids is None: position_ids = torch.arange( past_length, input_shape[-1] + past_length, dtype=torch.long, device=device, ) position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) encoder_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) if inputs_embeds is None: inputs_embeds = self.wte(input_ids) if batch_size <= 0: raise ValueError("batch_size has to be defined and > 0") attention_mask = self._prepare_decoder_attention_mask( attention_mask, input_shape, inputs_embeds, past_length ) hidden_states = inputs_embeds kv_seq_len = hidden_states.size()[1] if past_key_values[0] is not None: # past key values[0][0] shape: bs * seq_len * head_num * dim kv_seq_len += past_key_values[0][0].shape[1] if ( self.use_dynamic_ntk and kv_seq_len == hidden_states.size()[1] and not self.training ): context_value = math.log(kv_seq_len / self.seq_length, 2) + 1 ntk_alpha = 2 ** math.ceil(context_value) - 1 ntk_alpha = max(ntk_alpha, 1) else: ntk_alpha = self.rotary_emb._ntk_alpha_cached rotary_pos_emb = self.rotary_emb(kv_seq_len, ntk_alpha=ntk_alpha) for idx in range(len(rotary_pos_emb)): rotary_pos_emb[idx] = rotary_pos_emb[idx].to(hidden_states.device) hidden_states = self.drop(hidden_states).clone() if fake_images is not None: hidden_states = hidden_states + images.mean() * 0 elif images is not None: for idx, (i, a, b) in enumerate(img_pos): hidden_states[i][a + 1 : b] = images[idx] output_shape = input_shape + (hidden_states.size(-1),) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, use_cache, output_attentions) return custom_forward outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, rotary_pos_emb, self.registered_causal_mask, None, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask, ) else: outputs = block( hidden_states, layer_past=layer_past, rotary_pos_emb=rotary_pos_emb, registered_causal_mask=self.registered_causal_mask, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) hidden_states = self.ln_f(hidden_states) hidden_states = hidden_states.view(output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, presents, all_hidden_states] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, )
class QWenLMHeadModel(QWenLMHeadModel_chat):
6
2023-11-26 10:12:52+00:00
24k
wangermeng2021/llm-webui
main.py
[ { "identifier": "login_huggingface", "path": "src/utils/common.py", "snippet": "def login_huggingface(token,base_model_name_dropdown):\n if base_model_name_dropdown.lower().find(\"llama\") >= 0:\n if token:\n HUGGINGFACE_HUB_TOKEN = token\n print(\"d1:\",HUGGINGFACE_HUB_T...
import pandas as pd import math import numpy as np import gc import os,requests import subprocess,threading import time import gradio as gr import os import traceback import numpy as np import glob import shutil import torch import socket from src.utils.common import login_huggingface from src.finetune.huggingface_inference import HuggingfaceInference from src.finetune.llama_cpp_inference import LlamaCppInference from src.rag.qa_with_rag import QAWithRAG from src.utils.common import read_yaml,get_first_row_from_dataset,\ get_runs_model_names_from_dir,get_hg_model_names_from_dir,get_hg_model_names_and_gguf_from_dir,validate_model_path,get_runs_models from src.utils.chat_prompts import get_model_type,get_chat_history_prompt,get_model_prompt_template from transformers.training_args import OptimizerNames from huggingface_hub import hf_hub_download from src.utils import download_model from pathlib import Path from src.finetune.qlora_trainer import QloraTrainer from src.finetune.qlora_trainer import TRAINING_STATUS from src.utils.download_huggingface_repo import download_model_wrapper,download_dataset_wrapper
14,501
gr.Markdown("## &nbsp;Setting", elem_classes="white_background") with gr.Group(): with gr.Group(): gr.Markdown("### &nbsp;&nbsp;1.Chunking", elem_classes="white_background") with gr.Row(): text_splitter_dropdown = gr.Dropdown(["RecursiveCharacterTextSplitter"], label=f"Text Splitter", value="RecursiveCharacterTextSplitter", interactive=True, scale=1, min_width=1) with gr.Row(): chunk_size_slider = gr.Slider(32, 1024, value=256, step=32, label="Chunk Size", interactive=True, scale=1) chunk_overlap_slider = gr.Slider(0, 500, value=20, step=10, label="Chunk Overlap", interactive=True) Separators_textbox = gr.Textbox(label="Separators", value='''["\n\n", "\n", ".", " ", ""]''', interactive=True,visible=False) with gr.Group(): gr.Markdown("### &nbsp;&nbsp;2.Vector Store Retriever", elem_classes="white_background") # local_embedding_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),"rag","embedding_models") local_embedding_model_names = get_hg_model_names_from_dir(local_embedding_model_dir,"embedding_models") embedding_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_embedding_model_dir})"] embedding_model_source_radio = gr.Radio(embedding_model_source_radio_choices, label="Embedding Model Source", value=embedding_model_source_radio_choices[0], interactive=True) with gr.Row(): hub_embedding_model_names_dropdown = gr.Dropdown(embedding_model_names, label=f"",show_label=False, value=embedding_model_names[0] if embedding_model_names else None, interactive=True, scale=4, min_width=1) download_hub_embedding_model_names_btn = gr.Button("Download", scale=1) stop_download_hub_embedding_model_names_btn = gr.Button("Stop", scale=1, visible=False) local_embedding_model_names_dropdown = gr.Dropdown(local_embedding_model_names, label=f"Embedding Model",show_label=False, value=local_embedding_model_names[0] if local_embedding_model_names else None, interactive=True, scale=4, min_width=1,visible=False) refresh_local_embedding_model_names_btn = gr.Button("Refresh", scale=1,visible=False) # model_config_path1 = os.path.join(local_embedding_model_dir, # embedding_model_names[0], "pytorch_model.bin") # model_config_path2 = os.path.join(local_embedding_model_dir, # embedding_model_names[0], "model.safetensors") model_config_path = os.path.join(local_embedding_model_dir, embedding_model_names[0], "config.json") if os.path.exists(model_config_path): download_hub_embedding_model_status_markdown = gr.Markdown( '<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local.</span>') else: download_hub_embedding_model_status_markdown = gr.Markdown( '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') with gr.Row(): search_top_k_slider = gr.Slider(1, 10, value=3, step=1, label="Search Top K", interactive=True) search_score_threshold_slider = gr.Slider(0, 1, value=0.5, step=0.1, label="Search Score Threshold",interactive=True) with gr.Group(): gr.Markdown("### &nbsp;&nbsp;3.Chat Model", elem_classes="white_background") local_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),"models") runs_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runs") # local_chat_model_names = get_hg_model_names_from_dir(local_chat_model_dir) local_chat_model_names = get_hg_model_names_and_gguf_from_dir(local_chat_model_dir,runs_model_root_dir) chat_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_chat_model_dir})"] chat_model_source_radio = gr.Radio(chat_model_source_radio_choices, label="Chat Model source",show_label=False, value=chat_model_source_radio_choices[0], interactive=True) with gr.Row(): hub_chat_model_names_dropdown = gr.Dropdown(base_model_names, label=f"Chat Model",show_label=False,allow_custom_value=True, value=base_model_names[0] if base_model_names else None, interactive=True, scale=4, min_width=1) download_hub_chat_model_names_btn = gr.Button("Download", scale=1) stop_download_hub_chat_model_names_btn = gr.Button("Stop", scale=1, visible=False) local_chat_model_names_dropdown = gr.Dropdown(local_chat_model_names, label=f"Chat Model",show_label=False, value=local_chat_model_names[0] if local_chat_model_names else None, interactive=True, scale=4, min_width=1,visible=False) refresh_local_chat_model_names_btn = gr.Button("Refresh", scale=1,visible=False) rag_using_4bit_quantization_checkbox = gr.Checkbox(True, label="Using 4-bit quantization", interactive=True, visible=True, info="Less memory but slower", scale=1 ) if validate_model_path(base_model_names[0])[0]: download_hub_chat_model_status_markdown = gr.Markdown( '<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local.</span>') else: download_hub_chat_model_status_markdown = gr.Markdown( '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') with gr.Tab("Setting"): # with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;Setting", elem_classes="white_background") with gr.Group(): with gr.Row(): max_new_tokens_slider = gr.Slider(1, 4096, value=256, step=0.1, label="Max New Tokens", interactive=True) temperature_slider = gr.Slider(0, 5, value=1, step=0.1, label="Temperature", interactive=True) with gr.Row(): top_k_slider = gr.Slider(1, 100, value=50, step=1, label="Top_k", interactive=True) top_p_slider = gr.Slider(0, 1, value=1, step=0.1, label="Top_p", interactive=True) with gr.Row(): repeat_penalty_slider = gr.Slider(1, 5, value=1, step=0.1, label="Repeat Penalty", interactive=True) with gr.Row(): chat_history_window_slider = gr.Slider(1, 20, value=3, step=1, label="Chat History Window", interactive=True) low_cpu_mem_usage_checkbox = gr.Checkbox(False, label="Low Cpu Mem Usage",interactive=True,visible=False) Huggingface_hub_token = gr.Textbox(label="Huggingface Hub Token", value="") def check_local_model_or_dataset_is_empty1(base_model_name_dropdown,Huggingface_hub_token): if len(base_model_name_dropdown.strip()) == 0: raise gr.Error("Name is empty!") try:
# os.environ['HTTP_PROXY'] = 'http://127.0.0.1:8889' # os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:8889' LOCAL_HOST_IP = "0.0.0.0" TENSORBOARD_URL = "http://" + LOCAL_HOST_IP + ":6006/" INIT_DATASET_NAME = "test_python_code_instructions_5000_rows" RAG_DATA_LIST_DROPDOWN = "" TEXT_SPLITTER_DROPDOWN = "" CHUNK_SIZE_SLIDER = 0 CHUNK_OVERLAP_SLIDER = -1 SEPARATORS_TEXTBOX = "" EMBEDDING_MODEL_SOURCE_RADIO = "" HUB_EMBEDDING_MODEL_NAMES_DROPDOWN = "" LOCAL_EMBEDDING_MODEL_NAMES_DROPDOWN = "" CHAT_MODEL_SOURCE_RADIO = "" HUB_CHAT_MODEL_NAMES_DROPDOWN = "" LOCAL_CHAT_MODEL_NAMES_DROPDOWN = "" SEARCH_TOP_K_SLIDER = "" SEARCH_SCORE_THRESHOLD_SLIDER = "" training_ret_val = -1 error_msg = "" current_running_model_name = "" infer_model = None stop_generation_status = False chatbot_history=[] chatbot_height = 500 rag_chatbot_history=[] rag_stop_generation_status = False qa_with_rag = QAWithRAG() train_param_config = {} train_param_config["dataset"]={} train_param_config["model"]={} train_param_config["training"]={} model_zoo_config = {} transformer_optimizer_list = [] model_context_window = 0 init_train_file_path = None init_val_file_path = None INIT_PREFIX1 = "" INIT_PREFIX2 = "" INIT_PREFIX3 = "" INIT_PREFIX4 = "" INIT_COL1_TEXT = "" INIT_COL2_TEXT = "" INIT_COL3_TEXT = "" INIT_COL4_TEXT = "" col_names = [] DATASET_FIRST_ROW = None local_model_list = "" local_model_root_dir = "" base_model_names = [] training_base_model_names = [] embedding_model_names = [] base_model_context_window = [] local_dataset_list = [] local_dataset_root_dir = "" def get_local_embedding_model_list(): local_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "rag", "embedding_models") local_model_root_files = os.listdir(local_model_root_dir) local_model_list = [] for model_dir in local_model_root_files: if os.path.isdir(os.path.join(local_model_root_dir, model_dir)): local_model_list.append(model_dir) return local_model_list,local_model_root_dir def get_local_model_list(): local_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") local_model_root_files = os.listdir(local_model_root_dir) local_model_list = [] for model_dir in local_model_root_files: if os.path.isdir(os.path.join(local_model_root_dir, model_dir)): local_model_list.append(model_dir) return local_model_list,local_model_root_dir def get_local_dataset_list(): local_dataset_list = [] local_dataset_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "datasets") matched_dataset_file_path_list = glob.glob(os.path.join(local_dataset_root_dir,"**","dataset_infos.json"),recursive=False) for matched_file_path in matched_dataset_file_path_list: matched_pos1 = matched_file_path.rfind("datasets") matched_pos2 = matched_file_path.rfind("dataset_infos.json") local_dataset_list.append(matched_file_path[matched_pos1 + 9:matched_pos2-1]) matched_dataset_file_path_list = glob.glob(os.path.join(local_dataset_root_dir,"**","dataset_dict.json"),recursive=False) for matched_file_path in matched_dataset_file_path_list: matched_pos1 = matched_file_path.rfind("datasets") matched_pos2 = matched_file_path.rfind("dataset_dict.json") local_dataset_list.append(matched_file_path[matched_pos1 + 9:matched_pos2-1]) return local_dataset_list,local_dataset_root_dir def start_tensorboard_server(): try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((LOCAL_HOST_IP, 6006)) s.close() except Exception as e: tensorboard_cmd = f"tensorboard --logdir {os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs')} --reload_multifile True" tensorboard_proc = subprocess.Popen(tensorboard_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, close_fds=True) # bufsize=0, close_fds=True def init(): global config_dict,transformer_optimizer_list,model_context_window,init_train_file_path,init_val_file_path global INIT_PREFIX1,INIT_COL1_TEXT,INIT_PREFIX2,INIT_COL2_TEXT,INIT_PREFIX3,INIT_COL3_TEXT,INIT_PREFIX4,INIT_COL4_TEXT,col_names,DATASET_FIRST_ROW global local_model_list,local_model_root_dir global base_model_names,base_model_context_window,embedding_model_names,training_base_model_names global local_dataset_list, local_dataset_root_dir start_tensorboard_server() model_zoo_config = read_yaml(os.path.join(os.path.dirname(os.path.abspath(__file__)),"config","model_zoo.yaml")) transformer_optimizer_list = list(vars(OptimizerNames)["_value2member_map_"].keys()) #get dynamic context window from selected model model_context_window = [2048,1024,512] init_train_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "datasets", INIT_DATASET_NAME) DATASET_FIRST_ROW,split_list = get_first_row_from_dataset(init_train_file_path) col_names = list(DATASET_FIRST_ROW) col_names.insert(0,"") INIT_PREFIX1 = "<s>[INST] " INIT_PREFIX2 = "here are the inputs " INIT_PREFIX3 = " [/INST]" INIT_PREFIX4 = "</s>" INIT_COL1_TEXT = str(DATASET_FIRST_ROW[col_names[1]]) INIT_COL2_TEXT = str(DATASET_FIRST_ROW[col_names[2]]) INIT_COL3_TEXT = str(DATASET_FIRST_ROW[col_names[3]]) INIT_COL4_TEXT = "" local_model_list,local_model_root_dir = get_local_model_list() base_model_names = [model_name for model_name in model_zoo_config["model_list"]] training_base_model_names = [model_name for model_name in base_model_names if not model_name.endswith(".gguf")] # base_model_context_window = [model_name[1] for model_name in model_zoo_config["model_list"]] embedding_model_names = [model_name for model_name in model_zoo_config["embedding_model_list"]] local_dataset_list, local_dataset_root_dir = get_local_dataset_list() with gr.Blocks(title="FINETUNE",css="#vertical_center_align_markdown { position:absolute; top:30%;background-color:white;} .white_background {background-color: #ffffff} .none_border {border: none;border-collapse:collapse;}") as demo: init() local_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) local_dataset_root_dir_textbox = gr.Textbox(label="",value=local_dataset_root_dir, visible=False) local_embedding_model_root_dir_textbox = gr.Textbox(label="", value=os.path.join(os.path.dirname(os.path.abspath(__file__)), "rag", "embedding_models"), visible=False) local_chat_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) local_home_chat_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) session_state = gr.State(value={}) # html = gr.HTML("<p align='center';>llm-web-ui</p>",elem_id="header") with gr.Tab("Home"): with gr.Row(): # with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;ChatBot", elem_classes="white_background") with gr.Group(): gr.Markdown("### &nbsp;&nbsp;&nbsp;&nbsp;Chat Model", elem_classes="white_background") local_home_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") runs_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runs") local_home_chat_model_names = get_hg_model_names_and_gguf_from_dir(local_home_chat_model_dir, runs_model_root_dir) home_chat_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_home_chat_model_dir})"] home_chat_model_source_radio = gr.Radio(home_chat_model_source_radio_choices, label="Chat Model source", show_label=False, value=home_chat_model_source_radio_choices[0], interactive=True) with gr.Row(): hub_home_chat_model_names_dropdown = gr.Dropdown(base_model_names, label=f"Chat Model", show_label=False, allow_custom_value=True, value=base_model_names[ 0] if base_model_names else None, interactive=True, scale=4, min_width=1) local_home_chat_model_names_dropdown = gr.Dropdown(local_home_chat_model_names, label=f"Chat Model", show_label=False, value=local_home_chat_model_names[ 0] if local_home_chat_model_names else None, interactive=True, scale=4, min_width=1, visible=False) download_hub_home_chat_model_names_btn = gr.Button("Download", scale=1) stop_download_hub_home_chat_model_names_btn = gr.Button("Stop", scale=1, visible=False) refresh_local_home_chat_model_names_btn = gr.Button("Refresh", scale=1, visible=False) load_home_chat_model_btn = gr.Button("Load Model", scale=1, visible=True) using_4bit_quantization_checkbox = gr.Checkbox(True, label="Using 4-bit quantization", interactive=True, visible=True, info="Less memory but slower", scale=1 ) if validate_model_path(base_model_names[0])[0]: download_hub_home_chat_model_status_markdown = gr.Markdown( '<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local,click load model to run.</span>') else: download_hub_home_chat_model_status_markdown = gr.Markdown( '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') # home_chat_model_running_status_markdown = gr.Markdown( # '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') with gr.Row(): chatbot = gr.Chatbot(value=[],bubble_full_width=False,rtl=False,layout="panel",height=chatbot_height, avatar_images=((os.path.join(os.path.abspath(''),"pics", "user1.png")), (os.path.join(os.path.abspath(''),"pics", "bot4.png"))), ) with gr.Row(): input_txtbox = gr.Textbox( show_label=False,autofocus=True, placeholder="Enter text and press enter",scale=3 ) generate_btn = gr.Button("Generate", scale=1) stop_btn = gr.Button("Stop", scale=1) # clear_btn = gr.Button("Clear",scale=1) with gr.Tab("Fine-Tuning"): with gr.Tabs() as tensorboard_tab: with gr.TabItem("Training", id=0): with gr.Row(): with gr.Column(scale=1, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;1.Training", elem_classes="white_background") with gr.Group(): gr.Markdown("### &nbsp;1).Model", elem_classes="white_background") with gr.Group(): # gr.Markdown("<br> &nbsp;&nbsp;&nbsp; Base Model") base_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_model_root_dir})"] base_model_source_radio = gr.Radio(base_model_source_radio_choices, label="Base Model", value=base_model_source_radio_choices[0], interactive=True) with gr.Row(elem_classes="white_background"): base_model_name_dropdown = gr.Dropdown(training_base_model_names, label="Model Name", value=training_base_model_names[0] if training_base_model_names else None, interactive=True, visible=True, scale=5, allow_custom_value=True) download_local_model_btn = gr.Button("Download", scale=1, visible=True) stop_download_local_model_btn = gr.Button("Stop", scale=1, visible=False) # model_download_status = gr.Markdown("<div id='vertical_center_align_markdown'><p style='text-align: center;'>Not downloaded</p></div>", elem_classes="white_background",scale=1,full_width=True,visible=False) if validate_model_path(training_base_model_names[0])[0]: download_model_status_markdown = gr.Markdown('<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local.</span>') else: download_model_status_markdown = gr.Markdown('<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') with gr.Row(): # local_home_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") # runs_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runs") # local_model_list = get_hg_model_names_and_gguf_from_dir(local_home_chat_model_dir,runs_model_root_dir) local_model_list = get_hg_model_names_from_dir(os.path.dirname(os.path.abspath(__file__)), "models") local_model_dropdown = gr.Dropdown(local_model_list, label="Local Model", info="", value=local_model_list[0] if len(local_model_list) > 0 else None, interactive=True, elem_classes="white_background", scale=5, visible=False) refresh_local_model_list_btn = gr.Button("Refresh", scale=1, visible=False) fine_tuning_type_dropdown = gr.Dropdown(["QLoRA", "LoRA"], label="Fine-Tuning Type", info="", value="QLoRA", interactive=True) with gr.Group(): with gr.Row(elem_classes="white_background"): # gr.Markdown("### &nbsp;&nbsp;&nbsp; LoRA Config", elem_classes="white_background") lora_r_list = [str(ri) for ri in range(8, 65, 8)] lora_r_slider = gr.Slider(8, 64, value=8, step=8, label="lora_r", interactive=True) # lora_r_dropdown = gr.Dropdown(lora_r_list,label="lora_r", value=lora_r_list[0],interactive=True,allow_custom_value=True) lora_alpha_slider = gr.Slider(8, 96, value=32, step=8, label="lora_alpha", interactive=True) # lora_alpha_list = [str(ri) for ri in range(8, 97, 8)] # lora_alpha_dropdown = gr.Dropdown(lora_alpha_list,label="lora_alpha", value=lora_alpha_list[3],interactive=True,allow_custom_value=True) with gr.Row(elem_classes="white_background"): lora_dropout_slider = gr.Slider(0, 1, value=0.05, step=0.01, label="lora_dropout", interactive=True) lora_bias_dropdown = gr.Dropdown(["none", "all", "lora_only"], label="lora_bias", info="", value="none", interactive=True) with gr.Group(): gr.Markdown("### &nbsp;2).Dataset",elem_classes="white_background") dataset_source_radio_choices = ["Download From Huggingface Hub", f"From Local HG Dataset In {local_dataset_root_dir})"] dataset_source_radio = gr.Radio(dataset_source_radio_choices, label="Dataset Source", value=dataset_source_radio_choices[1], interactive=True) with gr.Row(equal_height=True): hg_dataset_path_textbox = gr.Textbox(label="Dataset Name:",elem_classes="none_border",visible=False, interactive=True, scale=4, value="iamtarun/python_code_instructions_18k_alpaca") download_local_dataset_btn = gr.Button("Download", scale=1, visible=False) stop_download_local_dataset_btn = gr.Button("Stop", scale=1, visible=False) download_dataset_status_markdown = gr.Markdown('') with gr.Row(): hg_train_dataset_dropdown = gr.Dropdown(["train"], label="Train set", info="", interactive=False,visible=False, elem_classes="white_background", scale=1,value="train") hg_val_dataset_dropdown = gr.Dropdown([], label="Val set", info="", interactive=False,visible=False, elem_classes="white_background", scale=1) with gr.Row(): local_dataset_list.pop( local_dataset_list.index(INIT_DATASET_NAME)) local_dataset_list.insert(0, INIT_DATASET_NAME) local_train_path_dataset_dropdown = gr.Dropdown(local_dataset_list, label="Train Dataset", info="", value=local_dataset_list[0] if len(local_dataset_list)>0 else None, interactive=True, elem_classes="white_background", scale=5, visible=True) refresh_local_train_path_dataset_list_btn = gr.Button("Refresh", scale=1, visible=True) with gr.Row(): local_train_dataset_dropdown = gr.Dropdown(["train"], label="Train set", info="", interactive=True, elem_classes="white_background", scale=1,value="train",visible=True) local_val_dataset_dropdown = gr.Dropdown([], label="Val set", info="", interactive=True, elem_classes="white_background", scale=1,visible=True) with gr.Group(elem_classes="white_background"): # gr.Markdown("<h4><br> &nbsp;&nbsp;Prompt Template: (Prefix1 + ColumnName1 + Prefix2 + ColumnName2)</h4>",elem_classes="white_background") gr.Markdown("<br> &nbsp;&nbsp;&nbsp;&nbsp;**Prompt Template: (Prefix1+ColumnName1+Prefix2+ColumnName2+Prefix3+ColumnName3+Prefix4+ColumnName4)**",elem_classes="white_background") gr.Markdown( "<span> &nbsp;&nbsp;&nbsp;&nbsp;**Note**:&nbsp;&nbsp;Llama2/Mistral Chat Template:<s\>[INST] instruction+input [/INST] output</s\> </span>",elem_classes="white_background") # using_llama2_chat_template_checkbox = gr.Checkbox(True, label="Using Llama2/Mistral chat template",interactive=True,visible=False) with gr.Row(elem_classes="white_background"): # prompt_template prefix1_textbox = gr.Textbox(label="Prefix1:",value=INIT_PREFIX1,lines=2,interactive=True,elem_classes="white_background") datatset_col1_dropdown = gr.Dropdown(col_names, label="ColumnName1:", info="",value=col_names[1],interactive=True,elem_classes="white_background") prefix2_textbox = gr.Textbox(label="Prefix2:",value=INIT_PREFIX2,lines=2,interactive=True,elem_classes="white_background") datatset_col2_dropdown = gr.Dropdown(col_names, label="ColumnName2:", info="",value=col_names[2],interactive=True,elem_classes="white_background") with gr.Row(elem_classes="white_background"): prefix3_textbox = gr.Textbox(label="Prefix3:",value=INIT_PREFIX3,lines=2,interactive=True,elem_classes="white_background") datatset_col3_dropdown = gr.Dropdown(col_names, label="ColumnName3:", info="",value=col_names[3],interactive=True,elem_classes="white_background") prefix4_textbox = gr.Textbox(label="Prefix4:",value=INIT_PREFIX4,lines=2,interactive=True,elem_classes="white_background") datatset_col4_dropdown = gr.Dropdown(col_names, label="ColumnName4:", info="",value=col_names[0],interactive=True,elem_classes="white_background") # print("") prompt_sample = INIT_PREFIX1 + INIT_COL1_TEXT + INIT_PREFIX2 + INIT_COL2_TEXT + INIT_PREFIX3 + INIT_COL3_TEXT + INIT_PREFIX4 + INIT_COL4_TEXT prompt_sample_textbox = gr.Textbox(label="Prompt Sample:",interactive=False,value=prompt_sample,lines=4) max_length_dropdown = gr.Dropdown(["Model Max Length"]+model_context_window, label="Max Length",value="Model Max Length", interactive=True,allow_custom_value=True) with gr.Group(): gr.Markdown("### &nbsp;3).Training Arguments",elem_classes="white_background") with gr.Row(elem_classes="white_background"): epochs_slider = gr.Slider(1, 100, value=10, step=1, label="Epochs", interactive=True) # epochs_dropdown = gr.Dropdown([1]+[bi for bi in range(10,101,10)], label="Epochs",value=1, interactive=True,allow_custom_value=True) batch_size_list = [1,2,3]+[bi for bi in range(4,32+1,4)] batch_size_slider = gr.Slider(1, 100, value=1, step=1, label="Batch Size", interactive=True) # batch_size_dropdown = gr.Dropdown(batch_size_list,label="Batch Size", info="",value=batch_size_list[0],interactive=True,allow_custom_value=True) # learning_rate_textbox = gr.Textbox(label="Learning Rate", value=2e-4,interactive=True) with gr.Row(elem_classes="white_background"): learning_rate_slider = gr.Slider(0, 0.01, value=2e-4, step=0.0001, label="Learning Rate", interactive=True) warmup_steps_slider = gr.Slider(0, 400, value=100, step=10, label="Warmup Steps", interactive=True) with gr.Row(elem_classes="white_background"): optimizer_dropdown = gr.Dropdown(transformer_optimizer_list, label="Optimizer", info="", value=transformer_optimizer_list[1], interactive=True) lr_scheduler_list = ["linear","cosine","cosine_with_hard_restarts","polynomial_decay","constant","constant_with_warmup","inverse_sqrt","reduce_on_plateau"] lr_scheduler_type_dropdown = gr.Dropdown(lr_scheduler_list, label="LR Scheduler Type", info="", value=lr_scheduler_list[0], interactive=True) with gr.Row(elem_classes="white_background"): early_stopping_patience_slider = gr.Slider(0, 50+1, value=0, step=5, label="Early Stopping Patience", interactive=True) gradient_accumulation_steps_slider = gr.Slider(1, 50, value=1, step=1, label="Gradient Accumulation Steps") with gr.Row(elem_classes="white_background"): eval_steps_slider = gr.Slider(0, 1000, value=100, step=100, label="eval_steps", interactive=True) gradient_checkpointing_checkbox = gr.Checkbox(True,label="Gradient Checkpointing",interactive=True) train_btn = gr.Button("Start Training") with gr.Column(scale=1, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;2.Test",elem_classes="white_background") training_runs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs') run_names = os.listdir(training_runs_dir) run_names.sort(key=lambda file:os.path.getmtime(os.path.join(training_runs_dir,file))) runs_output_model = [] for run_name in run_names: run_name_dir = os.path.join(training_runs_dir,run_name) run_output_model = os.path.join(run_name_dir,"output_model") if os.path.exists(run_output_model): run_output_model_names = os.listdir(run_output_model) for run_output_model_name in run_output_model_names: if run_output_model_name.find("merged_")>=0: runs_output_model.append(os.path.join(run_name,"output_model",run_output_model_name, "ori")) runs_output_model = runs_output_model[::-1] runs_output_model_dropdown = gr.Dropdown(runs_output_model, label="runs_output_model", value=runs_output_model[0] if runs_output_model else None, interactive=True) gr.Markdown("") gr.Markdown( "<span> &nbsp;&nbsp;&nbsp;&nbsp;**Note**:&nbsp;&nbsp;Llama2/Mistral Chat Template:<s\>[INST] instruction+input [/INST] output</s\> </span>", elem_classes="white_background") with gr.Row(): test_input_textbox = gr.Textbox(label="Input:", interactive=True, value="", lines=4, scale=4) generate_text_btn = gr.Button("Generate",scale=1) finetune_test_using_4bit_quantization_checkbox = gr.Checkbox(True, label="Using 4-bit quantization", interactive=True, visible=True, info="Less memory but slower", scale=1 ) # test_prompt = gr.Textbox(label="Prompt:", interactive=False, lines=2, scale=1) test_output = gr.Textbox(label="Output:", interactive=False,lines=4, scale=1) # def change_test_input_textbox(test_prefix1_textbox,test_input_textbox,test_prefix2_textbox): # return gr.update(value=test_prefix1_textbox+test_input_textbox+test_prefix2_textbox) # test_input_textbox.change(change_test_input_textbox,[test_prefix1_textbox,test_input_textbox,test_prefix2_textbox],test_prompt) with gr.Group(): gr.Markdown("## &nbsp;3.Quantization",elem_classes="white_background") with gr.Row(): quantization_type_list = ["gguf"] quantization_type_dropdown = gr.Dropdown(quantization_type_list, label="Quantization Type",value=quantization_type_list[0], interactive=True,scale=3) local_quantization_dataset_dropdown = gr.Dropdown(local_dataset_list, label="Dataset for quantization", value=local_dataset_list[0] if len( local_dataset_list) > 0 else None, interactive=True, elem_classes="white_background", scale=7, visible=False) refresh_local_quantization_dataset_btn = gr.Button("Refresh", scale=2, visible=False) def click_refresh_local_quantization_dataset_btn(): local_dataset_list, _ = get_local_dataset_list() return gr.update(choices=local_dataset_list, value=local_dataset_list[0] if len(local_dataset_list) > 0 else "") refresh_local_quantization_dataset_btn.click(click_refresh_local_quantization_dataset_btn,[],local_quantization_dataset_dropdown) with gr.Row(): training_runs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs') run_names = os.listdir(training_runs_dir) run_names.sort(key=lambda file: os.path.getmtime(os.path.join(training_runs_dir, file))) runs_output_model = [] for run_name in run_names: run_name_dir = os.path.join(training_runs_dir, run_name) run_output_model = os.path.join(run_name_dir, "output_model") if os.path.exists(run_output_model): run_output_model_names = os.listdir(run_output_model) for run_output_model_name in run_output_model_names: if run_output_model_name.find("merged_") >= 0: runs_output_model.append( os.path.join(run_name, "output_model", run_output_model_name, "ori")) runs_output_model = runs_output_model[::-1] quantization_runs_output_model_dropdown = gr.Dropdown(runs_output_model, label="runs_output_model", value=runs_output_model[ 0] if runs_output_model else None, interactive=True, scale=6) quantize_btn = gr.Button("Quantize", scale=1,visible=False) if runs_output_model: model_name = runs_output_model[0].split(os.sep)[-2].split('_')[-1] quantized_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs', os.sep.join(runs_output_model[0].split(os.sep)[0:-1]), "quantized_" + quantization_type_list[0] + "_" + model_name) if not os.path.exists(quantized_model_dir): os.makedirs(quantized_model_dir) quantization_logging_markdown = gr.Markdown("") gguf_quantization_markdown0 = gr.Markdown("### &nbsp;&nbsp;&nbsp;&nbsp;GGUF Quantization Instruction:", elem_classes="white_background", visible=True) gguf_quantization_markdown1 = gr.Markdown('''&nbsp;&nbsp;&nbsp;&nbsp;1.Follow the instructions in the llama.cpp to generate a GGUF:[https://github.com/ggerganov/llama.cpp#prepare-data--run](https://github.com/ggerganov/llama.cpp#prepare-data--run),<span style="color:red">&nbsp;&nbsp;Q4_K_M is recommend</span>''',visible=True) if runs_output_model: gguf_quantization_markdown2 = gr.Markdown(f"&nbsp;&nbsp;&nbsp;&nbsp;2.Convert {runs_output_model[0]} to gguf model",visible=True) else: gguf_quantization_markdown2 = gr.Markdown( f"", visible=True) gguf_quantization_markdown3 = gr.Markdown(f"&nbsp;&nbsp;&nbsp;&nbsp;3.Deploy gguf model", visible=False) else: quantization_logging_markdown = gr.Markdown("") gguf_quantization_markdown0 = gr.Markdown("### &nbsp;&nbsp;&nbsp;&nbsp;GGUF Quantization Instruction:", elem_classes="white_background", visible=True) gguf_quantization_markdown1 = gr.Markdown('''''',visible=True) gguf_quantization_markdown2 = gr.Markdown(f"",visible=True) gguf_quantization_markdown3 = gr.Markdown(f"", visible=True) with gr.Group(visible=False): gr.Markdown("## &nbsp;4.Deploy",elem_classes="white_background") with gr.Row(): deployment_framework_dropdown = gr.Dropdown(["TGI","llama-cpp-python"], label="Deployment Framework",value="TGI", interactive=True) with gr.Row(): training_runs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs') run_names = os.listdir(training_runs_dir) run_names.sort(key=lambda file: os.path.getmtime(os.path.join(training_runs_dir, file))) # ori_model_runs_output_model = [] tgi_model_format_runs_output_model = [] gguf_model_format_runs_output_model = [] for run_name in run_names: run_name_dir = os.path.join(training_runs_dir, run_name) run_output_model = os.path.join(run_name_dir, "output_model") if os.path.exists(run_output_model): run_output_model_names = os.listdir(run_output_model) for run_output_model_name in run_output_model_names: model_bin_path = os.path.exists( os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs', run_name, "output_model", run_output_model_name, "ori", "pytorch_model.bin")) if run_output_model_name.find("merged_") >= 0 and model_bin_path: tgi_model_format_runs_output_model.append( os.path.join(run_name, "output_model", run_output_model_name, "ori")) gptq_model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs',run_name, "output_model", run_output_model_name, "quantized_gptq_"+run_output_model_name.split('_')[-1], "pytorch_model.bin") if os.path.exists(gptq_model_path): tgi_model_format_runs_output_model.append(os.path.join(run_name, "output_model", run_output_model_name, "quantized_gptq_"+run_output_model_name.split('_')[-1])) gguf_model_dir = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'runs', run_name, "output_model", run_output_model_name, "quantized_gguf_" + run_output_model_name.split('_')[-1]) if os.path.exists(gguf_model_dir): gguf_model_names = os.listdir(gguf_model_dir) for gguf_model_name in gguf_model_names: if gguf_model_name.split('.')[-1] == "gguf": gguf_model_format_runs_output_model.append( os.path.join(run_name, "output_model", run_output_model_name, "quantized_gguf_" + run_output_model_name.split('_')[-1], gguf_model_name)) tgi_model_format_runs_output_model = tgi_model_format_runs_output_model[::-1] gguf_model_format_runs_output_model = gguf_model_format_runs_output_model[::-1] deployment_runs_output_model_dropdown = gr.Dropdown(tgi_model_format_runs_output_model, label="runs_output_model", value=tgi_model_format_runs_output_model[ 0] if tgi_model_format_runs_output_model else None, interactive=True,scale=6) refresh_deployment_runs_output_model_btn = gr.Button("Refresh", scale=1, visible=True) if tgi_model_format_runs_output_model: model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs', os.path.dirname(tgi_model_format_runs_output_model[0])) model_name = os.path.basename(tgi_model_format_runs_output_model[0]) if model_name.rfind("quantized_gptq_") >= 0: run_server_value = f'''docker run --gpus all --shm-size 1g -p 8080:80 -v {model_dir}:/data ghcr.io/huggingface/text-generation-inference:latest --model-id /data/{model_name} --quantize gptq''' else: run_server_value = f'''docker run --gpus all --shm-size 1g -p 8080:80 -v {model_dir}:/data ghcr.io/huggingface/text-generation-inference:latest --model-id /data/{model_name}''' run_server_script_textbox = gr.Textbox(label="Run Server:", interactive=False,lines=2, scale=1,value=run_server_value) run_client_value = '''Command-Line Interface(CLI):\ncurl 127.0.0.1:8080/generate -X POST -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' -H 'Content-Type: application/json'\n\nPython:\nfrom huggingface_hub import InferenceClient \nclient = InferenceClient(model="http://127.0.0.1:8080")\noutput = client.text_generation(prompt="What is Deep Learning?",max_new_tokens=512) ''' run_client_script_textbox = gr.Textbox(label="Run Client:", interactive=False, lines=6,scale=1,value=run_client_value) else: run_server_script_textbox = gr.Textbox(label="Run Server:", interactive=False,lines=2, scale=1,value="") run_client_script_textbox = gr.Textbox(label="Run Client:", interactive=False, lines=6, scale=1, value="") # deploy_llm_code = gr.Code(code_str, language="shell", lines=5, label="Install Requirements:") install_requirements_value = ''' ### &nbsp;&nbsp; 1.install docker ### &nbsp;&nbsp; 2.Install NVIDIA Container Toolkit <h4> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2.1 Configure the repository: </h4> <p> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \ && curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \ sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list \ && \ sudo apt-get update </p> <h4> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2.2 Install the NVIDIA Container Toolkit packages: </h4> <p>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; sudo apt-get install -y nvidia-container-toolkit </p> ''' with gr.Accordion("Install Requirements",open=False) as install_requirements_accordion: install_requirements_markdown = gr.Markdown(install_requirements_value) run_llama_cpp_python_code = gr.Code("", language="python", lines=10, label="run_model_using_llama_cpp_python.py",visible=False) # run_script_textbox = gr.Textbox(label="Install Requirements:", interactive=False, scale=1,value=install_requirements_value) #dependencies with gr.TabItem("Tensorboard", id=1) as fdddd: # training_log_markdown = gr.Markdown('',every=mytestfun) with gr.Row(): # training_log_textbox = gr.Textbox(label="logging:",value="", interactive=True, lines=2, scale=1) with gr.Group(): training_log_markdown = gr.Markdown('') stop_training_btn = gr.Button("Stop Training") training_runs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs') run_names = os.listdir(training_runs_dir) run_names = [run_name for run_name in run_names if os.path.isdir(os.path.join(training_runs_dir,run_name))] run_names.sort(key=lambda f: os.path.getmtime(os.path.join(training_runs_dir, f))) # print("dddddddd:",run_names) with gr.Group(): # with gr.Row(): training_runs_dropdown = gr.Dropdown(run_names, label="Training Runs",value=run_names[0] if run_names else None, interactive=True, scale=1) delete_text_btn = gr.Button("Delete Run", scale=1) iframe = f'<iframe src={TENSORBOARD_URL} style="border:none;height:1024px;width:100%">' tensorboard_html = gr.HTML(iframe) with gr.Tab("RAG"): with gr.Row(): with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;ChatBot", elem_classes="white_background") rag_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'rag', 'data') matched_file_list = [] supported_doc_type = ["*.pdf","*.txt","*.docx"] for doc_type in supported_doc_type: matched_file_list += glob.glob(os.path.join(rag_data_dir, doc_type), recursive=False) matched_file_list.sort(key=lambda file: os.path.getmtime(file),reverse=True) matched_file_name_list = [] for matched_file in matched_file_list: matched_file_name_list.append(os.path.basename(matched_file)) # chat_data_source_radio_choices = ["Chat With Document", # f"Chat With Image"] gr.Markdown("### &nbsp;Chat With Document", elem_classes="white_background") # chat_data_source_radio = gr.Radio(chat_data_source_radio_choices, # label="", # value=chat_data_source_radio_choices[0], # interactive=True) with gr.Row(): rag_data_list_dropdown = gr.Dropdown(matched_file_name_list, label=f"Local Documents In {rag_data_dir}", value=matched_file_name_list[0] if matched_file_name_list else None, interactive=True,scale=4, min_width=1) refresh_rag_data_list_btn = gr.Button("Refresh", scale=1, min_width=1) # if not current_running_model_name: # model_running_status_markdown = gr.Markdown(f"<span style='color:red'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;No modelis running!</span>") # else: # model_running_status_markdown = gr.Markdown(f"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Model is runing:{current_running_model_name}.</span>") def click_refresh_rag_data_list_btn(): rag_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'rag', 'data') matched_file_list = [] supported_doc_type = ["*.pdf", "*.txt", "*.docx"] for doc_type in supported_doc_type: matched_file_list += glob.glob(os.path.join(rag_data_dir, doc_type), recursive=False) matched_file_list.sort(key=lambda file: os.path.getmtime(file), reverse=True) matched_file_name_list = [] for matched_file in matched_file_list: matched_file_name_list.append(os.path.basename(matched_file)) return gr.update(choices=matched_file_name_list,value=matched_file_name_list[0] if matched_file_name_list else None) refresh_rag_data_list_btn.click(click_refresh_rag_data_list_btn,[],rag_data_list_dropdown) # def update_model_running_status(): # return gr.update(value=f"<span style='color:red'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;{current_running_model_name} is runing!.</span>") # # load_model_btn.click(click_load_model_btn,model_list_dropdown,[model_list_dropdown]).success(update_model_running_status,[],model_running_status_markdown) with gr.Row(): rag_chatbot = gr.Chatbot(value=[],bubble_full_width=False,rtl=False,layout="panel",height=chatbot_height, avatar_images=((os.path.join(os.path.abspath(''),"pics", "user1.png")), (os.path.join(os.path.abspath(''),"pics", "bot4.png"))), ) with gr.Row(): rag_input_txtbox = gr.Textbox( show_label=False,autofocus=True, placeholder="Enter text and press enter",scale=6) rag_generate_btn = gr.Button("Generate", scale=1) rag_stop_btn = gr.Button("Stop", scale=1) # rag_clear_btn = gr.Button("Clear", scale=1) rag_model_running_status_markdown = gr.Markdown( f"### &nbsp;&nbsp;Retrieved Document Chunks",visible=True) # retrieved_document_chunks_markdown = gr.Markdown( # f"### &nbsp;&nbsp;Retrieved Document Chunks",visible=True) retrieved_document_chunks_dataframe = gr.Dataframe( headers=["ID", "Chunk"], datatype=["str", "str"], show_label=False, value=None ) with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;Setting", elem_classes="white_background") with gr.Group(): with gr.Group(): gr.Markdown("### &nbsp;&nbsp;1.Chunking", elem_classes="white_background") with gr.Row(): text_splitter_dropdown = gr.Dropdown(["RecursiveCharacterTextSplitter"], label=f"Text Splitter", value="RecursiveCharacterTextSplitter", interactive=True, scale=1, min_width=1) with gr.Row(): chunk_size_slider = gr.Slider(32, 1024, value=256, step=32, label="Chunk Size", interactive=True, scale=1) chunk_overlap_slider = gr.Slider(0, 500, value=20, step=10, label="Chunk Overlap", interactive=True) Separators_textbox = gr.Textbox(label="Separators", value='''["\n\n", "\n", ".", " ", ""]''', interactive=True,visible=False) with gr.Group(): gr.Markdown("### &nbsp;&nbsp;2.Vector Store Retriever", elem_classes="white_background") # local_embedding_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),"rag","embedding_models") local_embedding_model_names = get_hg_model_names_from_dir(local_embedding_model_dir,"embedding_models") embedding_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_embedding_model_dir})"] embedding_model_source_radio = gr.Radio(embedding_model_source_radio_choices, label="Embedding Model Source", value=embedding_model_source_radio_choices[0], interactive=True) with gr.Row(): hub_embedding_model_names_dropdown = gr.Dropdown(embedding_model_names, label=f"",show_label=False, value=embedding_model_names[0] if embedding_model_names else None, interactive=True, scale=4, min_width=1) download_hub_embedding_model_names_btn = gr.Button("Download", scale=1) stop_download_hub_embedding_model_names_btn = gr.Button("Stop", scale=1, visible=False) local_embedding_model_names_dropdown = gr.Dropdown(local_embedding_model_names, label=f"Embedding Model",show_label=False, value=local_embedding_model_names[0] if local_embedding_model_names else None, interactive=True, scale=4, min_width=1,visible=False) refresh_local_embedding_model_names_btn = gr.Button("Refresh", scale=1,visible=False) # model_config_path1 = os.path.join(local_embedding_model_dir, # embedding_model_names[0], "pytorch_model.bin") # model_config_path2 = os.path.join(local_embedding_model_dir, # embedding_model_names[0], "model.safetensors") model_config_path = os.path.join(local_embedding_model_dir, embedding_model_names[0], "config.json") if os.path.exists(model_config_path): download_hub_embedding_model_status_markdown = gr.Markdown( '<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local.</span>') else: download_hub_embedding_model_status_markdown = gr.Markdown( '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') with gr.Row(): search_top_k_slider = gr.Slider(1, 10, value=3, step=1, label="Search Top K", interactive=True) search_score_threshold_slider = gr.Slider(0, 1, value=0.5, step=0.1, label="Search Score Threshold",interactive=True) with gr.Group(): gr.Markdown("### &nbsp;&nbsp;3.Chat Model", elem_classes="white_background") local_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),"models") runs_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runs") # local_chat_model_names = get_hg_model_names_from_dir(local_chat_model_dir) local_chat_model_names = get_hg_model_names_and_gguf_from_dir(local_chat_model_dir,runs_model_root_dir) chat_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_chat_model_dir})"] chat_model_source_radio = gr.Radio(chat_model_source_radio_choices, label="Chat Model source",show_label=False, value=chat_model_source_radio_choices[0], interactive=True) with gr.Row(): hub_chat_model_names_dropdown = gr.Dropdown(base_model_names, label=f"Chat Model",show_label=False,allow_custom_value=True, value=base_model_names[0] if base_model_names else None, interactive=True, scale=4, min_width=1) download_hub_chat_model_names_btn = gr.Button("Download", scale=1) stop_download_hub_chat_model_names_btn = gr.Button("Stop", scale=1, visible=False) local_chat_model_names_dropdown = gr.Dropdown(local_chat_model_names, label=f"Chat Model",show_label=False, value=local_chat_model_names[0] if local_chat_model_names else None, interactive=True, scale=4, min_width=1,visible=False) refresh_local_chat_model_names_btn = gr.Button("Refresh", scale=1,visible=False) rag_using_4bit_quantization_checkbox = gr.Checkbox(True, label="Using 4-bit quantization", interactive=True, visible=True, info="Less memory but slower", scale=1 ) if validate_model_path(base_model_names[0])[0]: download_hub_chat_model_status_markdown = gr.Markdown( '<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local.</span>') else: download_hub_chat_model_status_markdown = gr.Markdown( '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') with gr.Tab("Setting"): # with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;Setting", elem_classes="white_background") with gr.Group(): with gr.Row(): max_new_tokens_slider = gr.Slider(1, 4096, value=256, step=0.1, label="Max New Tokens", interactive=True) temperature_slider = gr.Slider(0, 5, value=1, step=0.1, label="Temperature", interactive=True) with gr.Row(): top_k_slider = gr.Slider(1, 100, value=50, step=1, label="Top_k", interactive=True) top_p_slider = gr.Slider(0, 1, value=1, step=0.1, label="Top_p", interactive=True) with gr.Row(): repeat_penalty_slider = gr.Slider(1, 5, value=1, step=0.1, label="Repeat Penalty", interactive=True) with gr.Row(): chat_history_window_slider = gr.Slider(1, 20, value=3, step=1, label="Chat History Window", interactive=True) low_cpu_mem_usage_checkbox = gr.Checkbox(False, label="Low Cpu Mem Usage",interactive=True,visible=False) Huggingface_hub_token = gr.Textbox(label="Huggingface Hub Token", value="") def check_local_model_or_dataset_is_empty1(base_model_name_dropdown,Huggingface_hub_token): if len(base_model_name_dropdown.strip()) == 0: raise gr.Error("Name is empty!") try:
login_huggingface(Huggingface_hub_token,base_model_name_dropdown)
0
2023-11-25 12:37:21+00:00
24k
danilonumeroso/conar
models/tsp_reasoner.py
[ { "identifier": "vmapped_beam_search_rollout", "path": "baselines/beam_search.py", "snippet": "BEAM_WIDTH = 128\ndef expand_single(beam_vis, beam_last, beam_cost, beam_par, W):\ndef beam_search_rollout_step(W, beam_width, i, tpl):\ndef beam_search_rollout(start_route, W, num_nodes, beam_width):\ndef bea...
from collections import defaultdict from pprint import pprint from torch_geometric.loader import DataLoader from pytorch_lightning.trainer.supporters import CombinedLoader from baselines.beam_search import vmapped_beam_search_rollout, BEAM_WIDTH from models.algorithm_reasoner import AlgorithmReasoner, LitAlgorithmReasoner from hyperparameters import get_hyperparameters from torch_geometric.utils import k_hop_subgraph from datasets._configs import CONFIGS from utils_execution import cross_entropy, check_edge_index_sorted, prepare_constants, edge_one_hot_encode_pointers, get_number_of_nodes from clrs import Type, Location, Stage import copy import itertools import time import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch_scatter import torch_geometric import pytorch_lightning as pl
15,275
u1, v1 = batch.edge_index[:, i] u2, v2 = edges[:, j] if u1 == u2 and v1 == v2: mask[i] = 1 j += 1 if j == edges.shape[1]: break assert j == edges.shape[1] return mask def get_mask_v2(edges): dense_edges = torch_geometric.utils.to_dense_adj(edges, batch=batch.batch).bool() dense_edges_batch = torch_geometric.utils.to_dense_adj(batch.edge_index, batch=batch.batch).bool() edge_index, mask = torch_geometric.utils.dense_to_sparse(((dense_edges & dense_edges_batch).float()+1)) mask = mask - 1 return mask acc = None # st = time.time() outputs = type(self.algorithm_module).convert_logits_to_outputs( self.dataset.spec, output_logits, batch.edge_index[0], batch.edge_index[1], batch.num_nodes, batch.batch, include_probabilities=False)['output'] for name in outputs: pred = outputs[name] pred_gt = getattr(batch, name) stage, loc, data_type = self.dataset.spec[name] if loc == Location.NODE: if name == 'predecessor_index': tours = torch.stack([torch.arange(pred.shape[0]).to(pred), pred]) mask = get_mask_v2(tours).bool() st = time.time() mattr = batch.edge_attr[mask] mbatch = batch.edge_index_batch[mask] msrc, mdst = batch.edge_index[:, mask] tour_len = torch_scatter.scatter_sum(mattr, mbatch) tour_correctness = torch_scatter.scatter_sum((msrc == mdst.sort().values), mbatch) assert sum(tour_correctness)/len(tour_correctness) == 1 return dict(tour_len=tour_len.mean(), tour_len_gt=batch.optimal_value.mean().item(), tour_correctness=sum(tour_correctness)/len(tour_correctness), tour_relative_error=((tour_len-batch.optimal_value)/batch.optimal_value).mean()) def process_TSP_tour_greedy(self, batch, output_logits): mask_active_nodes = torch.tensor(batch.start_route).bool() mask_edges_to_nodes_in_tour = torch.zeros_like(batch.edge_index[0]).bool() max_nodes_per_graph = batch.batch.unique(return_counts=True)[1].max() num_nodes_per_graph = batch.num_nodes // batch.num_graphs for _ in range(max_nodes_per_graph - 1): mask_active_edges = mask_active_nodes[batch.edge_index[0]] & ~mask_edges_to_nodes_in_tour # Any edge outwards of active nodes and not pointing to previously used node mask_edges_to_nodes_in_tour |= mask_active_nodes[batch.edge_index[1]] # any edge towards the active nodes should not be used in future iterations sloops = (batch.edge_index[0] == batch.edge_index[1]) preds = output_logits['output']['predecessor_index'].clone() preds = preds.masked_fill(~mask_active_edges | sloops, -1e6) # nudge the max value to ensure there is a unique maximum max_idxs = preds.reshape(-1, num_nodes_per_graph).argmax(-1) max_idxs = F.one_hot(max_idxs, num_nodes_per_graph) preds[max_idxs.bool().flatten()] = (preds.reshape(-1, num_nodes_per_graph)[max_idxs.bool()] + 1e-4).flatten() output_logits['output']['predecessor_index'][mask_active_nodes[batch.edge_index[0]]] = preds[mask_active_nodes[batch.edge_index[0]]] new_active_nodes = preds.reshape(-1, num_nodes_per_graph).argmax(-1)[mask_active_nodes.bool()].unsqueeze(-1) # NOTE the reshape/flatten mechanic may not work if graphs in the same batch are of different sizes (consider using torch_scatter.scatter_max) mask_active_nodes = F.one_hot(new_active_nodes, num_nodes_per_graph).flatten().bool() final_pred_mask = mask_active_nodes[batch.edge_index[0]] & batch.start_route.bool()[batch.edge_index[1]] output_logits['output']['predecessor_index'] = output_logits['output']['predecessor_index'].masked_fill(final_pred_mask, 1e8) return output_logits def process_TSP_tour_BS(self, batch, output_logits): start_route = torch_geometric.utils.to_dense_batch(batch.start_route, batch=batch.batch)[0] dens_logits = torch_geometric.utils.to_dense_adj(batch.edge_index, batch=batch.batch, edge_attr=output_logits['output']['predecessor_index']) num_nodes = start_route.shape[1] # st = time.time() tours = torch.tensor(np.array(vmapped_beam_search_rollout( start_route.cpu().detach().numpy(), -dens_logits.cpu().detach().numpy(), num_nodes, BEAM_WIDTH)), device=start_route.device) # print('tours took', time.time()-st) # st = time.time() dens_logits_o = torch.full_like(dens_logits, -1e9) arranged = torch.arange(dens_logits_o.shape[0], device=dens_logits.device) fr = tours[arranged, 0] to = tours[arranged, 1] batch_id = arranged.unsqueeze(1).expand_as(fr) fr = fr.flatten() to = to.flatten() batch_id = batch_id.flatten() dens_logits_o[batch_id, fr, to] = 1e9 edge_index, sparse_logits = torch_geometric.utils.dense_to_sparse(dens_logits_o) sparse_logits = sparse_logits.to(batch.edge_index.device) assert (edge_index == batch.edge_index).all() output_logits['output']['predecessor_index'] = sparse_logits # print('rest took', time.time()-st) return output_logits def process_TSP_tour(self, batch, output_logits): if self.ensure_permutation == "greedy": return self.process_TSP_tour_greedy(batch, output_logits) return self.process_TSP_tour_BS(batch, output_logits) def get_metrics(self, batch, all_hint_logits, output_logits, all_masks_graph): output_logits = self.process_TSP_tour(batch, output_logits) accs_dict = super().get_metrics(batch, all_hint_logits, output_logits, all_masks_graph) accs_dict.update(**self.get_tour_metrics(output_logits, batch)) return accs_dict def load_dataset(self, split, suffix=''): split = split+suffix
class TSPReasoner(AlgorithmReasoner): def __init__(self, spec, data, latent_features, algo_processor, bias=True, use_TF=False, L1_loss=False, global_termination_pool='max', #'predinet', get_attention=False, use_batch_norm=False, transferring=False, timeit=True, double_process=False, **algo_reasoner_kwargs): super().__init__( spec, data, latent_features, algo_processor, use_TF=use_TF, timeit=timeit, L1_loss=L1_loss, global_termination_pool=global_termination_pool, get_attention=get_attention, use_batch_norm=use_batch_norm, transferring=transferring, **algo_reasoner_kwargs, ) self.step_idx = 0 self.assert_checks = False self.debug = False self.debug_epoch_threshold = 1e9 self.next_step_pool = True self.double_process = double_process self.lambda_mul = 1# 0.0001 self.transferring = transferring def get_input_output_hints(self, batch): hint_inp_curr = dict() hint_out_curr = dict() return hint_inp_curr, hint_out_curr def process( self, *args, **kwargs): self.all_hint_logits, self.last_logits, self.all_masks_graph = super().process( *args, first_n_processors=1000 if not self.double_process else 1, **kwargs) if self.double_process: self.all_hint_logits, self.last_logits, self.all_masks_graph = super().process( *args, init_last_latent=self.last_latent, **kwargs) return self.all_hint_logits, self.last_logits, self.all_masks_graph class LitTSPReasoner(LitAlgorithmReasoner): def __init__(self, hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=True, use_TF=False, ensure_permutation='greedy', transferring=False, learning_rate=get_hyperparameters()['lr'], double_process=False, **algo_reasoner_kwargs): super().__init__(hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=bias, use_TF=use_TF, transferring=transferring, learning_rate=learning_rate, **algo_reasoner_kwargs) self.algorithm_module = TSPReasoner(self.dataset.spec, self.dataset[0], hidden_dim, algo_processor, bias=bias, use_TF=use_TF, transferring=transferring, timeit=self.timeit, double_process=double_process, **algo_reasoner_kwargs) self.ensure_permutation = ensure_permutation self.double_process = double_process self.save_hyperparameters(ignore=['algo_processor']) def training_step(self, batch, batch_idx): ret = {'loss': 0, 'losses_dict': defaultdict(list), 'accuracies': defaultdict(list)} for bb in batch: ans = super().training_step(bb, batch_idx) ret['loss'] += ans['loss'] for name in ['losses_dict', 'accuracies']: for k, v in ans[name].items(): ret[name][k].append(v) ret['loss'] /= len(batch) for name in ['losses_dict', 'accuracies']: for k, v in ans[name].items(): ret[name][k] = torch.tensor(v).mean() return ret def get_tour_metrics(self, output_logits, batch): def get_mask(edges): mask = torch.zeros_like(batch.edge_index[0]) j = 0 for i in range(batch.edge_index.shape[1]): u1, v1 = batch.edge_index[:, i] u2, v2 = edges[:, j] if u1 == u2 and v1 == v2: mask[i] = 1 j += 1 if j == edges.shape[1]: break assert j == edges.shape[1] return mask def get_mask_v2(edges): dense_edges = torch_geometric.utils.to_dense_adj(edges, batch=batch.batch).bool() dense_edges_batch = torch_geometric.utils.to_dense_adj(batch.edge_index, batch=batch.batch).bool() edge_index, mask = torch_geometric.utils.dense_to_sparse(((dense_edges & dense_edges_batch).float()+1)) mask = mask - 1 return mask acc = None # st = time.time() outputs = type(self.algorithm_module).convert_logits_to_outputs( self.dataset.spec, output_logits, batch.edge_index[0], batch.edge_index[1], batch.num_nodes, batch.batch, include_probabilities=False)['output'] for name in outputs: pred = outputs[name] pred_gt = getattr(batch, name) stage, loc, data_type = self.dataset.spec[name] if loc == Location.NODE: if name == 'predecessor_index': tours = torch.stack([torch.arange(pred.shape[0]).to(pred), pred]) mask = get_mask_v2(tours).bool() st = time.time() mattr = batch.edge_attr[mask] mbatch = batch.edge_index_batch[mask] msrc, mdst = batch.edge_index[:, mask] tour_len = torch_scatter.scatter_sum(mattr, mbatch) tour_correctness = torch_scatter.scatter_sum((msrc == mdst.sort().values), mbatch) assert sum(tour_correctness)/len(tour_correctness) == 1 return dict(tour_len=tour_len.mean(), tour_len_gt=batch.optimal_value.mean().item(), tour_correctness=sum(tour_correctness)/len(tour_correctness), tour_relative_error=((tour_len-batch.optimal_value)/batch.optimal_value).mean()) def process_TSP_tour_greedy(self, batch, output_logits): mask_active_nodes = torch.tensor(batch.start_route).bool() mask_edges_to_nodes_in_tour = torch.zeros_like(batch.edge_index[0]).bool() max_nodes_per_graph = batch.batch.unique(return_counts=True)[1].max() num_nodes_per_graph = batch.num_nodes // batch.num_graphs for _ in range(max_nodes_per_graph - 1): mask_active_edges = mask_active_nodes[batch.edge_index[0]] & ~mask_edges_to_nodes_in_tour # Any edge outwards of active nodes and not pointing to previously used node mask_edges_to_nodes_in_tour |= mask_active_nodes[batch.edge_index[1]] # any edge towards the active nodes should not be used in future iterations sloops = (batch.edge_index[0] == batch.edge_index[1]) preds = output_logits['output']['predecessor_index'].clone() preds = preds.masked_fill(~mask_active_edges | sloops, -1e6) # nudge the max value to ensure there is a unique maximum max_idxs = preds.reshape(-1, num_nodes_per_graph).argmax(-1) max_idxs = F.one_hot(max_idxs, num_nodes_per_graph) preds[max_idxs.bool().flatten()] = (preds.reshape(-1, num_nodes_per_graph)[max_idxs.bool()] + 1e-4).flatten() output_logits['output']['predecessor_index'][mask_active_nodes[batch.edge_index[0]]] = preds[mask_active_nodes[batch.edge_index[0]]] new_active_nodes = preds.reshape(-1, num_nodes_per_graph).argmax(-1)[mask_active_nodes.bool()].unsqueeze(-1) # NOTE the reshape/flatten mechanic may not work if graphs in the same batch are of different sizes (consider using torch_scatter.scatter_max) mask_active_nodes = F.one_hot(new_active_nodes, num_nodes_per_graph).flatten().bool() final_pred_mask = mask_active_nodes[batch.edge_index[0]] & batch.start_route.bool()[batch.edge_index[1]] output_logits['output']['predecessor_index'] = output_logits['output']['predecessor_index'].masked_fill(final_pred_mask, 1e8) return output_logits def process_TSP_tour_BS(self, batch, output_logits): start_route = torch_geometric.utils.to_dense_batch(batch.start_route, batch=batch.batch)[0] dens_logits = torch_geometric.utils.to_dense_adj(batch.edge_index, batch=batch.batch, edge_attr=output_logits['output']['predecessor_index']) num_nodes = start_route.shape[1] # st = time.time() tours = torch.tensor(np.array(vmapped_beam_search_rollout( start_route.cpu().detach().numpy(), -dens_logits.cpu().detach().numpy(), num_nodes, BEAM_WIDTH)), device=start_route.device) # print('tours took', time.time()-st) # st = time.time() dens_logits_o = torch.full_like(dens_logits, -1e9) arranged = torch.arange(dens_logits_o.shape[0], device=dens_logits.device) fr = tours[arranged, 0] to = tours[arranged, 1] batch_id = arranged.unsqueeze(1).expand_as(fr) fr = fr.flatten() to = to.flatten() batch_id = batch_id.flatten() dens_logits_o[batch_id, fr, to] = 1e9 edge_index, sparse_logits = torch_geometric.utils.dense_to_sparse(dens_logits_o) sparse_logits = sparse_logits.to(batch.edge_index.device) assert (edge_index == batch.edge_index).all() output_logits['output']['predecessor_index'] = sparse_logits # print('rest took', time.time()-st) return output_logits def process_TSP_tour(self, batch, output_logits): if self.ensure_permutation == "greedy": return self.process_TSP_tour_greedy(batch, output_logits) return self.process_TSP_tour_BS(batch, output_logits) def get_metrics(self, batch, all_hint_logits, output_logits, all_masks_graph): output_logits = self.process_TSP_tour(batch, output_logits) accs_dict = super().get_metrics(batch, all_hint_logits, output_logits, all_masks_graph) accs_dict.update(**self.get_tour_metrics(output_logits, batch)) return accs_dict def load_dataset(self, split, suffix=''): split = split+suffix
nns = get_number_of_nodes(self.algorithm, split)
9
2023-11-20 15:32:43+00:00
24k
bearyi26/DCPT
lib/train/base_functions.py
[ { "identifier": "Lasot", "path": "lib/train/dataset/lasot.py", "snippet": "class Lasot(BaseVideoDataset):\n \"\"\" LaSOT dataset.\n\n Publication:\n LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\n Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, H...
import torch import lib.train.data.transforms as tfm from torch.utils.data.distributed import DistributedSampler from lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet, BDD100K_Night, SHIFT_Night, ExDark from lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb from lib.train.data import sampler, opencv_loader, processing, LTRLoader from lib.utils.misc import is_main_process
21,081
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", "BDD100K_NIGHT", "SHIFT_NIGHT", "ExDark"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader)) else:
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", "BDD100K_NIGHT", "SHIFT_NIGHT", "ExDark"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader)) else:
datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader))
1
2023-11-20 06:41:15+00:00
24k
shercoo/RGDiffSR
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import datetime import math import cv2 import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import pygame from collections import OrderedDict from matplotlib import pyplot as plt from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from torchvision import transforms from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from text_super_resolution.model.VisionLAN.utils import Attention_AR_counter from text_super_resolution.model.tps_spatial_transformer import TPSSpatialTransformer from text_super_resolution.model.stn_head import STNHead from text_super_resolution.model.VisionLAN.VisionLAN import VisionLAN from utils.render_standard_text import * from text_super_resolution.loss.semantic_loss import SemanticLoss from text_super_resolution.utils import ssim_psnr from pygame import freetype from utils.metrics import *
14,521
return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) # print(cond.shape) if self.text_prior_enable: if isinstance(cond, dict): shape = (self.channels, cond['c_concat'][0].shape[2], cond['c_concat'][0].shape[3]) elif isinstance(cond, list): shape = (self.channels, cond[0].shape[2], cond[0].shape[3]) else: shape = (self.channels, cond.shape[2], cond.shape[3]) else: shape = (self.channels, cond.shape[2], cond.shape[3]) # shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, **kwargs): use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) # print('**********************c shape',c.shape) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption"]:
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} sem_loss = SemanticLoss() def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") print(sd.keys()) print(sd['epoch']) print(sd['global_step']) print(sd['callbacks']) # print(sd['optimizer_states']) # print(sd['lr_schedulers']) # print(sd['state_dict'].keys()) # exit(0) if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): # print('************************fuck',k) x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): # print('******************************in validation') _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, text_prior_enable=False, image_height=32, image_width=128, STN_enable=False, standard_text=False, VL_pretrained_path=None, fid_eval=False, visualize=False, down_sample_rate=2, recog_loss_enable=False, font_path=None, *args, **kwargs): self.fid_eval = fid_eval self.visualize = visualize self.text_prior_enable = text_prior_enable self.recog_loss_enable = recog_loss_enable self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True self.image_height = image_height self.image_width = image_width self.stn = STN_enable if self.stn: self.tps_inputsize = [image_height // down_sample_rate, image_width // down_sample_rate] tps_outputsize = [image_height // down_sample_rate, image_width // down_sample_rate] num_control_points = 20 tps_margins = [0.05, 0.05] self.tps = TPSSpatialTransformer( output_image_size=tuple(tps_outputsize), num_control_points=num_control_points, margins=tuple(tps_margins)) self.stn_head = STNHead( in_planes=3, num_ctrlpoints=num_control_points, activation='none', input_size=self.tps_inputsize) self.standard_text = standard_text if self.standard_text: # self.VL_model = self.VisionLAN_init(VL_pretrained_path) # self.test_acc_counter = Attention_AR_counter('\ntest accuracy: ', # '/home/zhouyuxuan/latent-diffusion/dic_36.txt', False) self.font_path = font_path pygame.init() freetype.init() self.cal_psnr = ssim_psnr.calculate_psnr self.cal_ssim = ssim_psnr.SSIM() def VisionLAN_init(self, path=None): cfg = {'args': { 'strides': [(1, 1), (2, 2), (2, 2), (2, 2), (1, 1), (1, 1)], 'input_shape': [3, 64, 256], # C x H x W }, 'init_state_dict': '/home/zhouyuxuan/latent-diffusion/visionlan.pth', } model_VL = VisionLAN(**cfg['args']) model_path = cfg['init_state_dict'] if path is None else path print('load pre_trained VisionLAN model from %s' % model_path) model_VL = model_VL.to(self.device) model_VL = nn.DataParallel(model_VL) if cfg['init_state_dict'] != None: fe_state_dict_ori = torch.load(model_path) fe_state_dict = OrderedDict() for k, v in fe_state_dict_ori.items(): if 'module' not in k: k = 'module.' + k else: k = k.replace('features.module.', 'module.features.') fe_state_dict[k] = v model_dict_fe = model_VL.state_dict() state_dict_fe = {k: v for k, v in fe_state_dict.items() if k in model_dict_fe.keys()} model_dict_fe.update(state_dict_fe) model_VL.load_state_dict(model_dict_fe) return model_VL def parse_visionlan_data(self, imgs_input): imgs_input = transforms.ToPILImage()(imgs_input).convert('RGB') imgs_input = cv2.resize(np.array(imgs_input), (256, 64)) imgs_input = transforms.ToTensor()(imgs_input).unsqueeze(0) imgs_input = imgs_input.to(self.device) return imgs_input def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids def on_save_checkpoint(self, checkpoint): if not isinstance(self.cond_stage_model, torch.nn.Identity): self.cond_stage_model.save_state_dict( '/home/zhouyuxuan/latent-diffusion/crnn_ckpt/', self.current_epoch) @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # print(x.shape) # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[1] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) # print('weighting',weighting.shape,Ly,Lx) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[1] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: # if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) if self.text_prior_enable: c = self.get_additional_cond(xc, c) # c = {'c_concat': [xc], 'c_crossattn': [c]} else: c = xc if bs is not None: if isinstance(c, dict): for k, v in c.items(): c[k] = [v[0][:bs]] else: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] # print('fuck',c.shape) if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape print('decode z shape', z.shape) if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") print(ks, stride, uf) fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) # same as above but without decorator def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params['original_image_size'] = x.shape[-2:] bs, nc, h, w = x.shape print('encode x shape', x.shape) print('ks', ks, 'stride', stride, 'df', df) if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) print('encode z shape', z.shape) output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def on_validation_start(self) -> None: print(f'******************************in validation {self.current_epoch}') def validation_step(self, batch, batch_idx): # print('******************************in validation') _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) if self.fid_eval and self.current_epoch % 10 == 0: results = self.recognize_sample(batch, N=114514, inpaint=False) rec_image = results['samples'] target = batch[self.first_stage_key] target = rearrange(target, 'b h w c -> b c h w') cond = batch[self.cond_stage_key] cond = rearrange(cond, 'b h w c -> b c h w') if self.visualize: batchlen = rec_image.shape[0] rc = int(math.sqrt(batchlen)) f, axs = plt.subplots(rc, rc, figsize=(16, 4), sharex=True, sharey=True) plt.subplots_adjust(wspace=0, hspace=0) print(len(axs), batchlen, int(math.sqrt(batchlen))) assert len(axs) ** 2 == batchlen for i in range(batchlen): axs[i // rc, i % rc].set_xticklabels([]) axs[i // rc, i % rc].set_yticklabels([]) axs[i // rc, i % rc].set_aspect('equal') axs[i // rc, i % rc].imshow(rec_image[i, :3, :, :].cpu().numpy().transpose(1, 2, 0)) axs[i // rc, i % rc].axis('off') plt.savefig(f'/home/zhouyuxuan/res/sample_{batch_idx}.jpg') plt.cla() f, axs = plt.subplots(rc, rc, figsize=(16, 4), sharex=True, sharey=True) plt.subplots_adjust(wspace=0, hspace=0) for i in range(batchlen): axs[i // rc, i % rc].imshow(target[i, :3, :, :].cpu().numpy().transpose(1, 2, 0)) axs[i // rc, i % rc].axis('off') plt.savefig(f'/home/zhouyuxuan/res/target_{batch_idx}.jpg') plt.cla() f, axs = plt.subplots(rc, rc, figsize=(16, 4), sharex=True, sharey=True) plt.subplots_adjust(wspace=0, hspace=0) for i in range(batchlen): axs[i // rc, i % rc].imshow(cond[i, :3, :, :].cpu().numpy().transpose(1, 2, 0)) axs[i // rc, i % rc].axis('off') plt.savefig(f'/home/zhouyuxuan/res/input_{batch_idx}.jpg') PSNR = self.cal_psnr(rec_image[:, :3], target[:, :3]) SSIM = self.cal_ssim(rec_image[:, :3], target[:, :3]) self.log_dict({'PSNR': PSNR, 'SSIM': SSIM}, prog_bar=False, logger=True, on_step=False, on_epoch=True) def shared_step(self, batch, **kwargs): # print('*******************************************************batch',batch['image'].shape) # print('*******************************************************batch',batch['image'].shape) # if hasattr(self, "split_input_params"): # print(self.split_input_params) # else: # print('fuck') x, c = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x, c) if self.recog_loss_enable: HR = batch['image'] HR = rearrange(HR, 'b h w c -> b c h w') HR = HR.to(memory_format=torch.contiguous_format).float() LR = c label_vecs = self.get_learned_conditioning(c).permute(1, 0, 2) label_vecs_hr = self.get_learned_conditioning(HR).permute(1, 0, 2) loss_recog_distill = sem_loss(label_vecs, label_vecs_hr) * 100 # 100 loss = loss + loss_recog_distill loss_dict.update({f'loss_recog': loss_recog_distill}) # return loss + loss_recog_distill, loss_dict # # else: return loss, loss_dict def get_additional_cond(self, c, tp): if self.stn: _, ctrl_points_c = self.stn_head(c) c, _ = self.tps(c, ctrl_points_c) if self.standard_text: x_q = torch.empty(1, 2, c.shape[2], c.shape[3]) # prob_lr = torch.empty(1, 25, 37) rec_results = get_string_crnn(tp.permute(1, 0, 2), False) for i in range(c.shape[0]): # visionlan_dict_lr = self.parse_visionlan_data(c[i, :3, :, :]) # target = '' # label_lr, label_length = self.VL_model(visionlan_dict_lr, target, '', False) # pred_str_lr, pred_prob = self.test_acc_counter.convert(label_lr, label_length) # s = pred_str_lr[0] # prob_lr = torch.cat([prob_lr, pred_prob], dim=0) s = rec_results[i] if s == "" or type(s) == torch.Tensor: s = "\t" lower_case = s.lower() upper_case = s.upper() i_t_lower = make_standard_text(self.font_path, lower_case, (c.shape[2], c.shape[3])) i_t_lower_tensor = torch.from_numpy(i_t_lower).unsqueeze(0).unsqueeze(0) i_t_upper = make_standard_text(self.font_path, upper_case, (c.shape[2], c.shape[3])) i_t_upper_tensor = torch.from_numpy(i_t_upper).unsqueeze(0).unsqueeze(0) i_t_tensor = torch.cat([i_t_lower_tensor, i_t_upper_tensor], dim=1) x_q = torch.cat([x_q, i_t_tensor], dim=0) x_q = x_q[1:] # prob_lr = prob_lr[1:] x_q = x_q.to(self.device) # prob_lr = prob_lr.to(self.device) c = torch.cat([c, x_q], dim=1) return {'c_concat': [c], 'c_crossattn': [tp]} def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.text_prior_enable and self.model.conditioning_key == 'hybrid': tp = self.get_learned_conditioning(c) c = self.get_additional_cond(c, tp) else: if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) # print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) # print("reducing stride") # print('ddpm','x_noisy shape',x_noisy.shape,'ks',ks,'stride',stride) fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if self.cond_stage_key in ["image", "LR_image", "segmentation", 'bbox_img'] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params['original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1])] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [(x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) for bbox in patch_limits] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning assert isinstance(cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) print(adapted_cond.shape) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient # apply model by loop over crops output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] assert not isinstance(output_list[0], tuple) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) self.logvar = self.logvar.to(self.device) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) # print(cond.shape) if self.text_prior_enable: if isinstance(cond, dict): shape = (self.channels, cond['c_concat'][0].shape[2], cond['c_concat'][0].shape[3]) elif isinstance(cond, list): shape = (self.channels, cond[0].shape[2], cond[0].shape[3]) else: shape = (self.channels, cond.shape[2], cond.shape[3]) else: shape = (self.channels, cond.shape[2], cond.shape[3]) # shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, **kwargs): use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) # print('**********************c shape',c.shape) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption"]:
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
0
2023-11-20 06:34:21+00:00
24k
microsoft/Project-BayesDAG
src/causica/models/visl.py
[ { "identifier": "Dataset", "path": "src/causica/datasets/dataset.py", "snippet": "class Dataset(BaseDataset):\n \"\"\"\n Class to store dense train/val/test data and masks and variables metadata.\n Note that the data and masks provided by this class are read only.\n \"\"\"\n\n def __init_...
import json import math import os import warnings import numpy as np # type: ignore import torch import torch.distributions as tdist import torch.nn.functional as F from typing import Callable, Dict, List, Optional, Tuple from torch import nn from torch.utils.data import DataLoader, TensorDataset from ..datasets.dataset import Dataset from ..datasets.variables import Variables from ..models.imodel import IModelForCausalInference from ..utils.helper_functions import to_tensors from ..utils.io_utils import save_json from ..utils.nri_utils import compute_dag_loss, get_feature_indices_per_node, kl_categorical, piecewise_linear from ..utils.training_objectives import get_input_and_scoring_masks, kl_divergence, negative_log_likelihood from .pvae_base_model import PVAEBaseModel
18,661
# 1. filling the missing values before applying the GNN-based VAE, # 2. processing the output of the GNN-based VAE (i.e. use torch.sigmoid in the binary case) types = np.array([v.type_ for v in self.variables._variables]) if (types == "binary").all(): self.var_types = "binary" elif (types == "continuous").all(): self.var_types = "continuous" elif (types == "categorical").all(): self.var_types = "categorical" else: raise ValueError("Right now all the variables need to have the same type") def _train( # type: ignore self, dataset: Dataset, report_progress_callback: Optional[Callable[[str, int, int], None]], learning_rate: float, batch_size: int, epochs: int, max_p_train_dropout: float, use_dag_loss: bool, output_variance: float, hard: bool, two_steps: bool, lambda_nll: float, lambda_kl_z: float, lambda_kl_A: float, lambda_dagloss: float, sample_count: int, ) -> Dict[str, List[float]]: """ Train the model using the given data. Args: dataset: Dataset with data and masks in processed form. train_output_dir: Path to save any training information to. report_progress_callback: Function to report model progress for API. learning_rate: Learning rate for Adam optimiser. batch_size: Size of minibatches to use. epochs: Number of epochs to train for. max_p_train_dropout: Maximum fraction of extra training features to drop for each row. 0 is none, 1 is all. use_dag_loss: Whether to use the DAG loss regularisation. output_variance: The variance for the output of the GNN based VAE. hard: Whether to use hard or soft samples for the distribution over edges (if hard=True, the edge weights are just 0/1). two_steps: Whether to use the two-step variant of VISL. That is, the first half of training uses only the forward MLP and the second half fixes the distribution over edges and only optimizes the forward and backward MLPs. lambda_nll: Lambda coefficient for the ELBO term negative-log-likelihood lambda_kl_z: Lambda coefficient for the ELBO term lambda*KL(q(z|x) || p(z)) lambda_kl_A: Lambda coefficient for the ELBO term lambda*KL(q(A) || p(A)) lambda_dagloss: Lambda coefficient for the dagloss term of the ELBO. sample_count: Number of samples to reconstruct. Returns: train_results (dictionary): training_loss, KL divergence, NLL, dag_loss, training_loss_complete """ # Put PyTorch into train mode. self.train() # Setting the hard attribute which will be used for training and testing self.hard = hard # Loading data and mask, creating results_dict data, mask = dataset.train_data_and_mask results_dict: Dict[str, List] = { metric: [] for metric in [ "training_loss", "kl_z_term", "kl_A_term", "nll_term", "dag_loss_term", "training_loss_complete", ] } # Creating the optimizer # If two_steps is True, we create a different optimizer for the second half. This optimizer does not optimize over the adjacency matrix. optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate) if two_steps: named_parameters = list(self.named_parameters()) all_but_adj_matrix = [] for a in named_parameters: if a[0] != "Z_edges_logits": all_but_adj_matrix.append(a[1]) optimizer_second_half = torch.optim.Adam(all_but_adj_matrix, lr=learning_rate) # Creating the dataloader tensor_dataset = TensorDataset(*to_tensors(data, mask, device=self.device)) dataloader = DataLoader(tensor_dataset, batch_size=batch_size, shuffle=True) # If DAG loss is used, it appears after 'epochs_without_dagloss' epochs and its coefficient (lambda) grows linearly # during 10% of the total number of epochs until 1. This scheme is used for lambda because of empirical # reasons (DAG loss might take over the training if it is used with coefficient 1.0 from the beginning). if use_dag_loss: epochs_without_dagloss = 5 if (epochs_without_dagloss + 0.1 * epochs) > epochs: warnings.warn("max lambda will not be achieved") best_train_loss = np.nan for epoch in range(epochs): training_loss_full = 0.0 nll_term_full = 0.0 kl_z_term_full = 0.0 kl_A_term_full = 0.0 dag_loss_term_full = 0.0 training_loss_complete_full = 0.0 # Set the optimizer_to_use depending on whether we are using the two-steps variant or not. if not two_steps: optimizer_to_use = optimizer only_forward = False elif epoch < epochs // 2: optimizer_to_use = optimizer only_forward = True else: optimizer_to_use = optimizer_second_half only_forward = False for x, mask_train_batch in dataloader: # Drop additional values (same procedure as PVAE)
# This is required in python 3 to allow return types of the same class. from __future__ import annotations class VISL(PVAEBaseModel, IModelForCausalInference): """ Subclass of `models.pvae_base_model.PVAEBaseModel` representing the algorithm VISL (missing value imputation with causal discovery). Requires file <data_dir>/<dataset_name>/adj_matrix.csv to evaluate causal discovery against ground truth. """ def __init__( self, model_id: str, variables: Variables, save_dir: str, device: torch.device, gnn_iters: int, shared_init_and_final_mappings: bool, embedding_dim: int, init_prob: float, simpler: str = None, **_, ): """ Args: model_id: Unique model ID for referencing this model instance. variables: Information about variables/features used by this model. save_dir: Location to save any information about this model, including training data. device: Device to load model to. gnn_iters: Number of message passing iterations for the GNN. shared_init_and_final_mappings: Whether all the nodes should use the same MLPs for the initial and final mappings. embedding_dim: Dimensionality of the nodes embedding. init_prob: Initial probability of having edge. simpler: Choose what MLP should be simpler (options are 'forward', 'backward', or None). Specifically, 'simpler' means to divide by 10 the dimensionality of the hidden layer of the corresponding MLP (with a minimum of 10 units). """ super().__init__(model_id, variables, save_dir, device) # Define some useful attributes feature_indices_per_node, ordered_nodes = get_feature_indices_per_node(variables) with open(os.path.join(self.save_dir, "ordered_nodes.json"), "w", encoding="utf-8") as f: json.dump(ordered_nodes, f, indent=4) self.num_nodes = len(feature_indices_per_node) self.num_edges = self.num_nodes * (self.num_nodes - 1) self.input_dim = variables.num_processed_cols # Define and initialize Z_edges # The learnable parameter is Z_edges_logits. Z_edges is F.softmax(Z_edges_logits, dim=1). self.Z_edges_logits = torch.nn.Parameter( torch.stack( [ torch.full([self.num_edges], math.log(1 - init_prob)), torch.full([self.num_edges], math.log(init_prob)), ], dim=1, ).to(device) ) # Define the GNN-based VAE self.gnn_vae = GNN_based_VAE( embedding_dim=embedding_dim, skip_first=True, device=device, n_iters=gnn_iters, num_nodes=self.num_nodes, shared_init_and_final_mappings=shared_init_and_final_mappings, simpler=simpler, feature_indices_per_node=feature_indices_per_node, ) # Create rel_rec and rel_send, which codify the receiving and sending node for each edge # Shape of rel_rec and rel_send: [num_edges, num_nodes] # The second dimension is a one-hot encoding of the receiver or sender node off_diag = np.ones([self.num_nodes, self.num_nodes]) - np.eye(self.num_nodes) rel_rec = F.one_hot(torch.tensor(np.where(off_diag)[0], dtype=torch.long)) rel_send = F.one_hot(torch.tensor(np.where(off_diag)[1], dtype=torch.long)) self.rel_rec = rel_rec.type(torch.float).to(device) self.rel_send = rel_send.type(torch.float).to(device) # Define the prior over edge types (favors sparse graphs) self.log_prior = torch.log( torch.tensor([0.95, 0.05], device=device) ) # The no-edge type is the first one (recall the skip_first argument of GNN_based_VAE __init__) # Save type of variables. Used in reconstruct method for # 1. filling the missing values before applying the GNN-based VAE, # 2. processing the output of the GNN-based VAE (i.e. use torch.sigmoid in the binary case) types = np.array([v.type_ for v in self.variables._variables]) if (types == "binary").all(): self.var_types = "binary" elif (types == "continuous").all(): self.var_types = "continuous" elif (types == "categorical").all(): self.var_types = "categorical" else: raise ValueError("Right now all the variables need to have the same type") def _train( # type: ignore self, dataset: Dataset, report_progress_callback: Optional[Callable[[str, int, int], None]], learning_rate: float, batch_size: int, epochs: int, max_p_train_dropout: float, use_dag_loss: bool, output_variance: float, hard: bool, two_steps: bool, lambda_nll: float, lambda_kl_z: float, lambda_kl_A: float, lambda_dagloss: float, sample_count: int, ) -> Dict[str, List[float]]: """ Train the model using the given data. Args: dataset: Dataset with data and masks in processed form. train_output_dir: Path to save any training information to. report_progress_callback: Function to report model progress for API. learning_rate: Learning rate for Adam optimiser. batch_size: Size of minibatches to use. epochs: Number of epochs to train for. max_p_train_dropout: Maximum fraction of extra training features to drop for each row. 0 is none, 1 is all. use_dag_loss: Whether to use the DAG loss regularisation. output_variance: The variance for the output of the GNN based VAE. hard: Whether to use hard or soft samples for the distribution over edges (if hard=True, the edge weights are just 0/1). two_steps: Whether to use the two-step variant of VISL. That is, the first half of training uses only the forward MLP and the second half fixes the distribution over edges and only optimizes the forward and backward MLPs. lambda_nll: Lambda coefficient for the ELBO term negative-log-likelihood lambda_kl_z: Lambda coefficient for the ELBO term lambda*KL(q(z|x) || p(z)) lambda_kl_A: Lambda coefficient for the ELBO term lambda*KL(q(A) || p(A)) lambda_dagloss: Lambda coefficient for the dagloss term of the ELBO. sample_count: Number of samples to reconstruct. Returns: train_results (dictionary): training_loss, KL divergence, NLL, dag_loss, training_loss_complete """ # Put PyTorch into train mode. self.train() # Setting the hard attribute which will be used for training and testing self.hard = hard # Loading data and mask, creating results_dict data, mask = dataset.train_data_and_mask results_dict: Dict[str, List] = { metric: [] for metric in [ "training_loss", "kl_z_term", "kl_A_term", "nll_term", "dag_loss_term", "training_loss_complete", ] } # Creating the optimizer # If two_steps is True, we create a different optimizer for the second half. This optimizer does not optimize over the adjacency matrix. optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate) if two_steps: named_parameters = list(self.named_parameters()) all_but_adj_matrix = [] for a in named_parameters: if a[0] != "Z_edges_logits": all_but_adj_matrix.append(a[1]) optimizer_second_half = torch.optim.Adam(all_but_adj_matrix, lr=learning_rate) # Creating the dataloader tensor_dataset = TensorDataset(*to_tensors(data, mask, device=self.device)) dataloader = DataLoader(tensor_dataset, batch_size=batch_size, shuffle=True) # If DAG loss is used, it appears after 'epochs_without_dagloss' epochs and its coefficient (lambda) grows linearly # during 10% of the total number of epochs until 1. This scheme is used for lambda because of empirical # reasons (DAG loss might take over the training if it is used with coefficient 1.0 from the beginning). if use_dag_loss: epochs_without_dagloss = 5 if (epochs_without_dagloss + 0.1 * epochs) > epochs: warnings.warn("max lambda will not be achieved") best_train_loss = np.nan for epoch in range(epochs): training_loss_full = 0.0 nll_term_full = 0.0 kl_z_term_full = 0.0 kl_A_term_full = 0.0 dag_loss_term_full = 0.0 training_loss_complete_full = 0.0 # Set the optimizer_to_use depending on whether we are using the two-steps variant or not. if not two_steps: optimizer_to_use = optimizer only_forward = False elif epoch < epochs // 2: optimizer_to_use = optimizer only_forward = True else: optimizer_to_use = optimizer_second_half only_forward = False for x, mask_train_batch in dataloader: # Drop additional values (same procedure as PVAE)
input_mask, scoring_mask = get_input_and_scoring_masks(
9
2023-11-21 12:55:08+00:00
24k
KU-Leuven-Geomatics/geomapi
geomapi/nodes/panonode.py
[ { "identifier": "Node", "path": "geomapi/nodes/node.py", "snippet": "class Node:\n def __init__(self, subject: URIRef = None,\n graph: Graph = None,\n graphPath: str = None, \n name:str=None,\n ...
from ast import Raise from distutils import extension from pathlib import Path from typing import Tuple from rdflib import Graph, URIRef from scipy.spatial.transform import Rotation as R from geomapi.nodes import Node from geomapi.nodes import ImageNode import xml.etree.ElementTree as ET import cv2 import PIL import numpy as np import os import open3d as o3d import math import uuid import matplotlib.pyplot as plt import geomapi.utils as ut import geomapi.utils.geometryutils as gmu import geomapi.utils.imageutils as it
15,659
""" Panonode is a Python Class to govern the data and metadata of panoramic data (OpenCV, PIL). This node builds upon the OpenCV and PIL API for the image definitions. It directly inherits from Node. Be sure to check the properties defined in the above classes to initialise the Node. """ #IMPORT PACKAGES #IMPORT MODULES
""" Panonode is a Python Class to govern the data and metadata of panoramic data (OpenCV, PIL). This node builds upon the OpenCV and PIL API for the image definitions. It directly inherits from Node. Be sure to check the properties defined in the above classes to initialise the Node. """ #IMPORT PACKAGES #IMPORT MODULES
class PanoNode(ImageNode):
1
2023-11-23 08:15:01+00:00
24k
Yifei-Y/Openset-RCNN
openset_rcnn/evaluation/os_coco_evaluation.py
[ { "identifier": "GRASPNET_KNOWN_IDS", "path": "openset_rcnn/data/graspnet_meta.py", "snippet": "GRASPNET_KNOWN_IDS = [graspnet_known_name_id_dic[name_cat] for name_cat in GRASPNET_KNOWN_CATEGORIES]" }, { "identifier": "GRASPNET_KNOWN_CATEGORIES", "path": "openset_rcnn/data/graspnet_meta.py",...
import contextlib import copy import io import itertools import json import logging import numpy as np import os import pickle import pycocotools.mask as mask_util import torch import detectron2.utils.comm as comm from collections import OrderedDict from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval from tabulate import tabulate from detectron2.config import CfgNode from detectron2.data import MetadataCatalog from detectron2.data.datasets.coco import convert_to_coco_json from detectron2.structures import Boxes, BoxMode, pairwise_iou from detectron2.utils.file_io import PathManager from detectron2.utils.logger import create_small_table from detectron2.evaluation.evaluator import DatasetEvaluator from detectron2.evaluation.coco_evaluation import instances_to_coco_json from openset_rcnn.data.graspnet_meta import GRASPNET_KNOWN_IDS, GRASPNET_KNOWN_CATEGORIES from .os_cocoeval import OpensetCOCOEval
14,921
# Copyright (c) Facebook, Inc. and its affiliates. class OpensetCOCOEvaluator(DatasetEvaluator): """ Evaluate AR for object proposals, AP for instance detection/segmentation, AP for keypoint detection outputs using COCO's metrics. See http://cocodataset.org/#detection-eval and http://cocodataset.org/#keypoints-eval to understand its metrics. The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means the metric cannot be computed (e.g. due to no predictions made). In addition to COCO, this evaluator is able to support any bounding box detection, instance segmentation, or keypoint detection dataset. """ def __init__( self, dataset_name, eval_type, tasks=None, distributed=True, output_dir=None, *, max_dets_per_image=None, use_fast_impl=True, kpt_oks_sigmas=(), ): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in detectron2's standard dataset format so it can be converted to COCO format automatically. tasks (tuple[str]): tasks that can be evaluated under the given configuration. A task is one of "bbox", "segm", "keypoints". By default, will infer this automatically from predictions. distributed (True): if True, will collect results from all ranks and run evaluation in the main process. Otherwise, will only evaluate the results in the current process. output_dir (str): optional, an output directory to dump all results predicted on the dataset. The dump contains two files: 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and contains all the results in the format they are produced by the model. 2. "coco_instances_results.json" a json file in COCO's result format. max_dets_per_image (list[int]): limit on the maximum number of detections per image. use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. Although the results should be very close to the official implementation in COCO API, it is still recommended to compute results with the official API for use in papers. The faster implementation also uses more RAM. """ self._logger = logging.getLogger(__name__) self._distributed = distributed self._output_dir = output_dir self._use_fast_impl = use_fast_impl self._max_dets_per_image = max_dets_per_image if tasks is not None and isinstance(tasks, CfgNode): kpt_oks_sigmas = ( tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas ) self._logger.warn( "COCO Evaluator instantiated using config, this is deprecated behavior." " Please pass in explicit arguments instead." ) self._tasks = None # Infering it from predictions should be better else: self._tasks = tasks self._cpu_device = torch.device("cpu") self.known_names = GRASPNET_KNOWN_CATEGORIES
# Copyright (c) Facebook, Inc. and its affiliates. class OpensetCOCOEvaluator(DatasetEvaluator): """ Evaluate AR for object proposals, AP for instance detection/segmentation, AP for keypoint detection outputs using COCO's metrics. See http://cocodataset.org/#detection-eval and http://cocodataset.org/#keypoints-eval to understand its metrics. The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means the metric cannot be computed (e.g. due to no predictions made). In addition to COCO, this evaluator is able to support any bounding box detection, instance segmentation, or keypoint detection dataset. """ def __init__( self, dataset_name, eval_type, tasks=None, distributed=True, output_dir=None, *, max_dets_per_image=None, use_fast_impl=True, kpt_oks_sigmas=(), ): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in detectron2's standard dataset format so it can be converted to COCO format automatically. tasks (tuple[str]): tasks that can be evaluated under the given configuration. A task is one of "bbox", "segm", "keypoints". By default, will infer this automatically from predictions. distributed (True): if True, will collect results from all ranks and run evaluation in the main process. Otherwise, will only evaluate the results in the current process. output_dir (str): optional, an output directory to dump all results predicted on the dataset. The dump contains two files: 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and contains all the results in the format they are produced by the model. 2. "coco_instances_results.json" a json file in COCO's result format. max_dets_per_image (list[int]): limit on the maximum number of detections per image. use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. Although the results should be very close to the official implementation in COCO API, it is still recommended to compute results with the official API for use in papers. The faster implementation also uses more RAM. """ self._logger = logging.getLogger(__name__) self._distributed = distributed self._output_dir = output_dir self._use_fast_impl = use_fast_impl self._max_dets_per_image = max_dets_per_image if tasks is not None and isinstance(tasks, CfgNode): kpt_oks_sigmas = ( tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas ) self._logger.warn( "COCO Evaluator instantiated using config, this is deprecated behavior." " Please pass in explicit arguments instead." ) self._tasks = None # Infering it from predictions should be better else: self._tasks = tasks self._cpu_device = torch.device("cpu") self.known_names = GRASPNET_KNOWN_CATEGORIES
self.known_ids = GRASPNET_KNOWN_IDS
0
2023-11-21 01:47:01+00:00
24k
jiawei-ren/dreamgaussian4d
diffusers/src/diffusers/models/unet_3d_blocks.py
[ { "identifier": "is_torch_version", "path": "diffusers/src/diffusers/utils/import_utils.py", "snippet": "def is_torch_version(operation: str, version: str):\n \"\"\"\n Args:\n Compares the current PyTorch version to a given reference with an operation.\n operation (`str`):\n A...
from typing import Any, Dict, Optional, Tuple, Union from torch import nn from ..utils import is_torch_version from ..utils.torch_utils import apply_freeu from .attention import Attention from .dual_transformer_2d import DualTransformer2DModel from .resnet import ( Downsample2D, ResnetBlock2D, SpatioTemporalResBlock, TemporalConvLayer, Upsample2D, ) from .transformer_2d import Transformer2DModel from .transformer_temporal import ( TransformerSpatioTemporalModel, TransformerTemporalModel, ) import torch
19,280
name="op", ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None, num_frames: int = 1, ) -> Union[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: output_states = () for resnet, temp_conv in zip(self.resnets, self.temp_convs): hidden_states = resnet(hidden_states, temb) hidden_states = temp_conv(hidden_states, num_frames=num_frames) output_states += (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states += (hidden_states,) return hidden_states, output_states class CrossAttnUpBlock3D(nn.Module): def __init__( self, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads: int = 1, cross_attention_dim: int = 1280, output_scale_factor: float = 1.0, add_upsample: bool = True, dual_cross_attention: bool = False, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, resolution_idx: Optional[int] = None, ): super().__init__() resnets = [] temp_convs = [] attentions = [] temp_attentions = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) temp_convs.append( TemporalConvLayer( out_channels, out_channels, dropout=0.1, norm_num_groups=resnet_groups, ) ) attentions.append( Transformer2DModel( out_channels // num_attention_heads, num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, ) ) temp_attentions.append( TransformerTemporalModel( out_channels // num_attention_heads, num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) self.resnets = nn.ModuleList(resnets) self.temp_convs = nn.ModuleList(temp_convs) self.attentions = nn.ModuleList(attentions) self.temp_attentions = nn.ModuleList(temp_attentions) if add_upsample:
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def get_down_block( down_block_type: str, num_layers: int, in_channels: int, out_channels: int, temb_channels: int, add_downsample: bool, resnet_eps: float, resnet_act_fn: str, num_attention_heads: int, resnet_groups: Optional[int] = None, cross_attention_dim: Optional[int] = None, downsample_padding: Optional[int] = None, dual_cross_attention: bool = False, use_linear_projection: bool = True, only_cross_attention: bool = False, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", temporal_num_attention_heads: int = 8, temporal_max_seq_length: int = 32, transformer_layers_per_block: int = 1, ) -> Union[ "DownBlock3D", "CrossAttnDownBlock3D", "DownBlockMotion", "CrossAttnDownBlockMotion", "DownBlockSpatioTemporal", "CrossAttnDownBlockSpatioTemporal", ]: if down_block_type == "DownBlock3D": return DownBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "CrossAttnDownBlock3D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock3D") return CrossAttnDownBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, ) if down_block_type == "DownBlockMotion": return DownBlockMotion( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift, temporal_num_attention_heads=temporal_num_attention_heads, temporal_max_seq_length=temporal_max_seq_length, ) elif down_block_type == "CrossAttnDownBlockMotion": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlockMotion") return CrossAttnDownBlockMotion( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, temporal_num_attention_heads=temporal_num_attention_heads, temporal_max_seq_length=temporal_max_seq_length, ) elif down_block_type == "DownBlockSpatioTemporal": # added for SDV return DownBlockSpatioTemporal( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, ) elif down_block_type == "CrossAttnDownBlockSpatioTemporal": # added for SDV if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlockSpatioTemporal") return CrossAttnDownBlockSpatioTemporal( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, num_layers=num_layers, transformer_layers_per_block=transformer_layers_per_block, add_downsample=add_downsample, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, ) raise ValueError(f"{down_block_type} does not exist.") def get_up_block( up_block_type: str, num_layers: int, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, add_upsample: bool, resnet_eps: float, resnet_act_fn: str, num_attention_heads: int, resolution_idx: Optional[int] = None, resnet_groups: Optional[int] = None, cross_attention_dim: Optional[int] = None, dual_cross_attention: bool = False, use_linear_projection: bool = True, only_cross_attention: bool = False, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", temporal_num_attention_heads: int = 8, temporal_cross_attention_dim: Optional[int] = None, temporal_max_seq_length: int = 32, transformer_layers_per_block: int = 1, dropout: float = 0.0, ) -> Union[ "UpBlock3D", "CrossAttnUpBlock3D", "UpBlockMotion", "CrossAttnUpBlockMotion", "UpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal", ]: if up_block_type == "UpBlock3D": return UpBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, resolution_idx=resolution_idx, ) elif up_block_type == "CrossAttnUpBlock3D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D") return CrossAttnUpBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resolution_idx=resolution_idx, ) if up_block_type == "UpBlockMotion": return UpBlockMotion( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, resolution_idx=resolution_idx, temporal_num_attention_heads=temporal_num_attention_heads, temporal_max_seq_length=temporal_max_seq_length, ) elif up_block_type == "CrossAttnUpBlockMotion": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlockMotion") return CrossAttnUpBlockMotion( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resolution_idx=resolution_idx, temporal_num_attention_heads=temporal_num_attention_heads, temporal_max_seq_length=temporal_max_seq_length, ) elif up_block_type == "UpBlockSpatioTemporal": # added for SDV return UpBlockSpatioTemporal( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resolution_idx=resolution_idx, add_upsample=add_upsample, ) elif up_block_type == "CrossAttnUpBlockSpatioTemporal": # added for SDV if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlockSpatioTemporal") return CrossAttnUpBlockSpatioTemporal( in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, num_layers=num_layers, transformer_layers_per_block=transformer_layers_per_block, add_upsample=add_upsample, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, resolution_idx=resolution_idx, ) raise ValueError(f"{up_block_type} does not exist.") class UNetMidBlock3DCrossAttn(nn.Module): def __init__( self, in_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads: int = 1, output_scale_factor: float = 1.0, cross_attention_dim: int = 1280, dual_cross_attention: bool = False, use_linear_projection: bool = True, upcast_attention: bool = False, ): super().__init__() self.has_cross_attention = True self.num_attention_heads = num_attention_heads resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) # there is always at least one resnet resnets = [ ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ] temp_convs = [ TemporalConvLayer( in_channels, in_channels, dropout=0.1, norm_num_groups=resnet_groups, ) ] attentions = [] temp_attentions = [] for _ in range(num_layers): attentions.append( Transformer2DModel( in_channels // num_attention_heads, num_attention_heads, in_channels=in_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, ) ) temp_attentions.append( TransformerTemporalModel( in_channels // num_attention_heads, num_attention_heads, in_channels=in_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) temp_convs.append( TemporalConvLayer( in_channels, in_channels, dropout=0.1, norm_num_groups=resnet_groups, ) ) self.resnets = nn.ModuleList(resnets) self.temp_convs = nn.ModuleList(temp_convs) self.attentions = nn.ModuleList(attentions) self.temp_attentions = nn.ModuleList(temp_attentions) def forward( self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, num_frames: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, ) -> torch.FloatTensor: hidden_states = self.resnets[0](hidden_states, temb) hidden_states = self.temp_convs[0](hidden_states, num_frames=num_frames) for attn, temp_attn, resnet, temp_conv in zip( self.attentions, self.temp_attentions, self.resnets[1:], self.temp_convs[1:] ): hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] hidden_states = temp_attn( hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] hidden_states = resnet(hidden_states, temb) hidden_states = temp_conv(hidden_states, num_frames=num_frames) return hidden_states class CrossAttnDownBlock3D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads: int = 1, cross_attention_dim: int = 1280, output_scale_factor: float = 1.0, downsample_padding: int = 1, add_downsample: bool = True, dual_cross_attention: bool = False, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, ): super().__init__() resnets = [] attentions = [] temp_attentions = [] temp_convs = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) temp_convs.append( TemporalConvLayer( out_channels, out_channels, dropout=0.1, norm_num_groups=resnet_groups, ) ) attentions.append( Transformer2DModel( out_channels // num_attention_heads, num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, ) ) temp_attentions.append( TransformerTemporalModel( out_channels // num_attention_heads, num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) self.resnets = nn.ModuleList(resnets) self.temp_convs = nn.ModuleList(temp_convs) self.attentions = nn.ModuleList(attentions) self.temp_attentions = nn.ModuleList(temp_attentions) if add_downsample: self.downsamplers = nn.ModuleList( [ Downsample2D( out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op", ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, num_frames: int = 1, cross_attention_kwargs: Dict[str, Any] = None, ) -> Union[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: # TODO(Patrick, William) - attention mask is not used output_states = () for resnet, temp_conv, attn, temp_attn in zip( self.resnets, self.temp_convs, self.attentions, self.temp_attentions ): hidden_states = resnet(hidden_states, temb) hidden_states = temp_conv(hidden_states, num_frames=num_frames) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] hidden_states = temp_attn( hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] output_states += (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states += (hidden_states,) return hidden_states, output_states class DownBlock3D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, output_scale_factor: float = 1.0, add_downsample: bool = True, downsample_padding: int = 1, ): super().__init__() resnets = [] temp_convs = [] for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) temp_convs.append( TemporalConvLayer( out_channels, out_channels, dropout=0.1, norm_num_groups=resnet_groups, ) ) self.resnets = nn.ModuleList(resnets) self.temp_convs = nn.ModuleList(temp_convs) if add_downsample: self.downsamplers = nn.ModuleList( [ Downsample2D( out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op", ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None, num_frames: int = 1, ) -> Union[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: output_states = () for resnet, temp_conv in zip(self.resnets, self.temp_convs): hidden_states = resnet(hidden_states, temb) hidden_states = temp_conv(hidden_states, num_frames=num_frames) output_states += (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states += (hidden_states,) return hidden_states, output_states class CrossAttnUpBlock3D(nn.Module): def __init__( self, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads: int = 1, cross_attention_dim: int = 1280, output_scale_factor: float = 1.0, add_upsample: bool = True, dual_cross_attention: bool = False, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, resolution_idx: Optional[int] = None, ): super().__init__() resnets = [] temp_convs = [] attentions = [] temp_attentions = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) temp_convs.append( TemporalConvLayer( out_channels, out_channels, dropout=0.1, norm_num_groups=resnet_groups, ) ) attentions.append( Transformer2DModel( out_channels // num_attention_heads, num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, ) ) temp_attentions.append( TransformerTemporalModel( out_channels // num_attention_heads, num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) self.resnets = nn.ModuleList(resnets) self.temp_convs = nn.ModuleList(temp_convs) self.attentions = nn.ModuleList(attentions) self.temp_attentions = nn.ModuleList(temp_attentions) if add_upsample:
self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
8
2023-12-28 08:17:40+00:00
24k
FoundationVision/UniRef
detectron2/utils/visualizer.py
[ { "identifier": "MetadataCatalog", "path": "detectron2/data/catalog.py", "snippet": "class _DatasetCatalog(UserDict):\nclass Metadata(types.SimpleNamespace):\nclass _MetadataCatalog(UserDict):\n def register(self, name, func):\n def get(self, name):\n def list(self) -> List[str]:\n def remov...
import colorsys import logging import math import cv2 import matplotlib as mpl import matplotlib.colors as mplc import matplotlib.figure as mplfigure import numpy as np import pycocotools.mask as mask_util import torch from enum import Enum, unique from detectron2.data import MetadataCatalog from detectron2.structures import ( BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes, ) from detectron2.utils.file_io import PathManager from matplotlib.backends.backend_agg import FigureCanvasAgg from PIL import Image from .colormap import random_color from panopticapi.utils import rgb2id
16,641
dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format. Returns: output (VisImage): image object with visualizations. """ annos = dic.get("annotations", None) if annos: if "segmentation" in annos[0]: masks = [x["segmentation"] for x in annos] else: masks = None if "keypoints" in annos[0]: keypts = [x["keypoints"] for x in annos] keypts = np.array(keypts).reshape(len(annos), -1, 3) else: keypts = None boxes = [ BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) if len(x["bbox"]) == 4 else x["bbox"] for x in annos ] colors = None category_ids = [x["category_id"] for x in annos] if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): colors = [ self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids ] names = self.metadata.get("thing_classes", None) labels = _create_text_labels( category_ids, scores=None, class_names=names, is_crowd=[x.get("iscrowd", 0) for x in annos], ) self.overlay_instances( labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors ) sem_seg = dic.get("sem_seg", None) if sem_seg is None and "sem_seg_file_name" in dic: with PathManager.open(dic["sem_seg_file_name"], "rb") as f: sem_seg = Image.open(f) sem_seg = np.asarray(sem_seg, dtype="uint8") if sem_seg is not None: self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5) pan_seg = dic.get("pan_seg", None) if pan_seg is None and "pan_seg_file_name" in dic: with PathManager.open(dic["pan_seg_file_name"], "rb") as f: pan_seg = Image.open(f) pan_seg = np.asarray(pan_seg) pan_seg = rgb2id(pan_seg) if pan_seg is not None: segments_info = dic["segments_info"] pan_seg = torch.tensor(pan_seg) self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5) return self.output def overlay_instances( self, *, boxes=None, labels=None, masks=None, keypoints=None, assigned_colors=None, alpha=0.5, ): """ Args: boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`, or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image, or a :class:`RotatedBoxes`, or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format for the N objects in a single image, labels (list[str]): the text to be displayed for each instance. masks (masks-like object): Supported types are: * :class:`detectron2.structures.PolygonMasks`, :class:`detectron2.structures.BitMasks`. * list[list[ndarray]]: contains the segmentation masks for all objects in one image. The first level of the list corresponds to individual instances. The second level to all the polygon that compose the instance, and the third level to the polygon coordinates. The third level should have the format of [x0, y0, x1, y1, ..., xn, yn] (n >= 3). * list[ndarray]: each ndarray is a binary mask of shape (H, W). * list[dict]: each dict is a COCO-style RLE. keypoints (Keypoint or array like): an array-like object of shape (N, K, 3), where the N is the number of instances and K is the number of keypoints. The last dimension corresponds to (x, y, visibility or score). assigned_colors (list[matplotlib.colors]): a list of colors, where each color corresponds to each mask or box in the image. Refer to 'matplotlib.colors' for full list of formats that the colors are accepted in. Returns: output (VisImage): image object with visualizations. """ num_instances = 0 if boxes is not None: boxes = self._convert_boxes(boxes) num_instances = len(boxes) if masks is not None: masks = self._convert_masks(masks) if num_instances: assert len(masks) == num_instances else: num_instances = len(masks) if keypoints is not None: if num_instances: assert len(keypoints) == num_instances else: num_instances = len(keypoints) keypoints = self._convert_keypoints(keypoints) if labels is not None: assert len(labels) == num_instances if assigned_colors is None:
# Copyright (c) Facebook, Inc. and its affiliates. logger = logging.getLogger(__name__) __all__ = ["ColorMode", "VisImage", "Visualizer"] _SMALL_OBJECT_AREA_THRESH = 1000 _LARGE_MASK_AREA_THRESH = 120000 _OFF_WHITE = (1.0, 1.0, 240.0 / 255) _BLACK = (0, 0, 0) _RED = (1.0, 0, 0) _KEYPOINT_THRESHOLD = 0.05 @unique class ColorMode(Enum): """ Enum of different color modes to use for instance visualizations. """ IMAGE = 0 """ Picks a random color for every instance and overlay segmentations with low opacity. """ SEGMENTATION = 1 """ Let instances of the same category have similar colors (from metadata.thing_colors), and overlay them with high opacity. This provides more attention on the quality of segmentation. """ IMAGE_BW = 2 """ Same as IMAGE, but convert all areas without masks to gray-scale. Only available for drawing per-instance mask predictions. """ class GenericMask: """ Attribute: polygons (list[ndarray]): list[ndarray]: polygons for this mask. Each ndarray has format [x, y, x, y, ...] mask (ndarray): a binary mask """ def __init__(self, mask_or_polygons, height, width): self._mask = self._polygons = self._has_holes = None self.height = height self.width = width m = mask_or_polygons if isinstance(m, dict): # RLEs assert "counts" in m and "size" in m if isinstance(m["counts"], list): # uncompressed RLEs h, w = m["size"] assert h == height and w == width m = mask_util.frPyObjects(m, h, w) self._mask = mask_util.decode(m)[:, :] return if isinstance(m, list): # list[ndarray] self._polygons = [np.asarray(x).reshape(-1) for x in m] return if isinstance(m, np.ndarray): # assumed to be a binary mask assert m.shape[1] != 2, m.shape assert m.shape == ( height, width, ), f"mask shape: {m.shape}, target dims: {height}, {width}" self._mask = m.astype("uint8") return raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m))) @property def mask(self): if self._mask is None: self._mask = self.polygons_to_mask(self._polygons) return self._mask @property def polygons(self): if self._polygons is None: self._polygons, self._has_holes = self.mask_to_polygons(self._mask) return self._polygons @property def has_holes(self): if self._has_holes is None: if self._mask is not None: self._polygons, self._has_holes = self.mask_to_polygons(self._mask) else: self._has_holes = False # if original format is polygon, does not have holes return self._has_holes def mask_to_polygons(self, mask): # cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level # hierarchy. External contours (boundary) of the object are placed in hierarchy-1. # Internal contours (holes) are placed in hierarchy-2. # cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours. mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) hierarchy = res[-1] if hierarchy is None: # empty mask return [], False has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0 res = res[-2] res = [x.flatten() for x in res] # These coordinates from OpenCV are integers in range [0, W-1 or H-1]. # We add 0.5 to turn them into real-value coordinate space. A better solution # would be to first +0.5 and then dilate the returned polygon by 0.5. res = [x + 0.5 for x in res if len(x) >= 6] return res, has_holes def polygons_to_mask(self, polygons): rle = mask_util.frPyObjects(polygons, self.height, self.width) rle = mask_util.merge(rle) return mask_util.decode(rle)[:, :] def area(self): return self.mask.sum() def bbox(self): p = mask_util.frPyObjects(self.polygons, self.height, self.width) p = mask_util.merge(p) bbox = mask_util.toBbox(p) bbox[2] += bbox[0] bbox[3] += bbox[1] return bbox class _PanopticPrediction: """ Unify different panoptic annotation/prediction formats """ def __init__(self, panoptic_seg, segments_info, metadata=None): if segments_info is None: assert metadata is not None # If "segments_info" is None, we assume "panoptic_img" is a # H*W int32 image storing the panoptic_id in the format of # category_id * label_divisor + instance_id. We reserve -1 for # VOID label. label_divisor = metadata.label_divisor segments_info = [] for panoptic_label in np.unique(panoptic_seg.numpy()): if panoptic_label == -1: # VOID region. continue pred_class = panoptic_label // label_divisor isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values() segments_info.append( { "id": int(panoptic_label), "category_id": int(pred_class), "isthing": bool(isthing), } ) del metadata self._seg = panoptic_seg self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True) areas = areas.numpy() sorted_idxs = np.argsort(-areas) self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs] self._seg_ids = self._seg_ids.tolist() for sid, area in zip(self._seg_ids, self._seg_areas): if sid in self._sinfo: self._sinfo[sid]["area"] = float(area) def non_empty_mask(self): """ Returns: (H, W) array, a mask for all pixels that have a prediction """ empty_ids = [] for id in self._seg_ids: if id not in self._sinfo: empty_ids.append(id) if len(empty_ids) == 0: return np.zeros(self._seg.shape, dtype=np.uint8) assert ( len(empty_ids) == 1 ), ">1 ids corresponds to no labels. This is currently not supported" return (self._seg != empty_ids[0]).numpy().astype(np.bool) def semantic_masks(self): for sid in self._seg_ids: sinfo = self._sinfo.get(sid) if sinfo is None or sinfo["isthing"]: # Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions. continue yield (self._seg == sid).numpy().astype(np.bool), sinfo def instance_masks(self): for sid in self._seg_ids: sinfo = self._sinfo.get(sid) if sinfo is None or not sinfo["isthing"]: continue mask = (self._seg == sid).numpy().astype(np.bool) if mask.sum() > 0: yield mask, sinfo def _create_text_labels(classes, scores, class_names, is_crowd=None): """ Args: classes (list[int] or None): scores (list[float] or None): class_names (list[str] or None): is_crowd (list[bool] or None): Returns: list[str] or None """ labels = None if classes is not None: if class_names is not None and len(class_names) > 0: labels = [class_names[i] for i in classes] else: labels = [str(i) for i in classes] if scores is not None: if labels is None: labels = ["{:.0f}%".format(s * 100) for s in scores] else: labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)] if labels is not None and is_crowd is not None: labels = [l + ("|crowd" if crowd else "") for l, crowd in zip(labels, is_crowd)] return labels class VisImage: def __init__(self, img, scale=1.0): """ Args: img (ndarray): an RGB image of shape (H, W, 3) in range [0, 255]. scale (float): scale the input image """ self.img = img self.scale = scale self.width, self.height = img.shape[1], img.shape[0] self._setup_figure(img) def _setup_figure(self, img): """ Args: Same as in :meth:`__init__()`. Returns: fig (matplotlib.pyplot.figure): top level container for all the image plot elements. ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system. """ fig = mplfigure.Figure(frameon=False) self.dpi = fig.get_dpi() # add a small 1e-2 to avoid precision lost due to matplotlib's truncation # (https://github.com/matplotlib/matplotlib/issues/15363) fig.set_size_inches( (self.width * self.scale + 1e-2) / self.dpi, (self.height * self.scale + 1e-2) / self.dpi, ) self.canvas = FigureCanvasAgg(fig) # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig) ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) ax.axis("off") self.fig = fig self.ax = ax self.reset_image(img) def reset_image(self, img): """ Args: img: same as in __init__ """ img = img.astype("uint8") self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest") def save(self, filepath): """ Args: filepath (str): a string that contains the absolute path, including the file name, where the visualized image will be saved. """ self.fig.savefig(filepath) def get_image(self): """ Returns: ndarray: the visualized image of shape (H, W, 3) (RGB) in uint8 type. The shape is scaled w.r.t the input image using the given `scale` argument. """ canvas = self.canvas s, (width, height) = canvas.print_to_buffer() # buf = io.BytesIO() # works for cairo backend # canvas.print_rgba(buf) # width, height = self.width, self.height # s = buf.getvalue() buffer = np.frombuffer(s, dtype="uint8") img_rgba = buffer.reshape(height, width, 4) rgb, alpha = np.split(img_rgba, [3], axis=2) return rgb.astype("uint8") class Visualizer: """ Visualizer that draws data about detection/segmentation on images. It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}` that draw primitive objects to images, as well as high-level wrappers like `draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}` that draw composite data in some pre-defined style. Note that the exact visualization style for the high-level wrappers are subject to change. Style such as color, opacity, label contents, visibility of labels, or even the visibility of objects themselves (e.g. when the object is too small) may change according to different heuristics, as long as the results still look visually reasonable. To obtain a consistent style, you can implement custom drawing functions with the abovementioned primitive methods instead. If you need more customized visualization styles, you can process the data yourself following their format documented in tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not intend to satisfy everyone's preference on drawing styles. This visualizer focuses on high rendering quality rather than performance. It is not designed to be used for real-time applications. """ # TODO implement a fast, rasterized version using OpenCV def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE): """ Args: img_rgb: a numpy array of shape (H, W, C), where H and W correspond to the height and width of the image respectively. C is the number of color channels. The image is required to be in RGB format since that is a requirement of the Matplotlib library. The image is also expected to be in the range [0, 255]. metadata (Metadata): dataset metadata (e.g. class names and colors) instance_mode (ColorMode): defines one of the pre-defined style for drawing instances on an image. """ self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8) if metadata is None: metadata = MetadataCatalog.get("__nonexist__") self.metadata = metadata self.output = VisImage(self.img, scale=scale) self.cpu_device = torch.device("cpu") # too small texts are useless, therefore clamp to 9 self._default_font_size = max( np.sqrt(self.output.height * self.output.width) // 90, 10 // scale ) self._instance_mode = instance_mode self.keypoint_threshold = _KEYPOINT_THRESHOLD def draw_instance_predictions(self, predictions): """ Draw instance-level prediction results on an image. Args: predictions (Instances): the output of an instance detection/segmentation model. Following fields will be used to draw: "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle"). Returns: output (VisImage): image object with visualizations. """ boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None scores = predictions.scores if predictions.has("scores") else None classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None)) keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None if predictions.has("pred_masks"): masks = np.asarray(predictions.pred_masks) masks = [GenericMask(x, self.output.height, self.output.width) for x in masks] else: masks = None if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): colors = [ self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes ] alpha = 0.8 else: colors = None alpha = 0.5 if self._instance_mode == ColorMode.IMAGE_BW: self.output.reset_image( self._create_grayscale_image( (predictions.pred_masks.any(dim=0) > 0).numpy() if predictions.has("pred_masks") else None ) ) alpha = 0.3 self.overlay_instances( masks=masks, boxes=boxes, labels=labels, keypoints=keypoints, assigned_colors=colors, alpha=alpha, ) return self.output def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8): """ Draw semantic segmentation predictions/labels. Args: sem_seg (Tensor or ndarray): the segmentation of shape (H, W). Each value is the integer label of the pixel. area_threshold (int): segments with less than `area_threshold` are not drawn. alpha (float): the larger it is, the more opaque the segmentations are. Returns: output (VisImage): image object with visualizations. """ if isinstance(sem_seg, torch.Tensor): sem_seg = sem_seg.numpy() labels, areas = np.unique(sem_seg, return_counts=True) sorted_idxs = np.argsort(-areas).tolist() labels = labels[sorted_idxs] for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels): try: mask_color = [x / 255 for x in self.metadata.stuff_colors[label]] except (AttributeError, IndexError): mask_color = None binary_mask = (sem_seg == label).astype(np.uint8) text = self.metadata.stuff_classes[label] self.draw_binary_mask( binary_mask, color=mask_color, edge_color=_OFF_WHITE, text=text, alpha=alpha, area_threshold=area_threshold, ) return self.output def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7): """ Draw panoptic prediction annotations or results. Args: panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. segments_info (list[dict] or None): Describe each segment in `panoptic_seg`. If it is a ``list[dict]``, each dict contains keys "id", "category_id". If None, category id of each pixel is computed by ``pixel // metadata.label_divisor``. area_threshold (int): stuff segments with less than `area_threshold` are not drawn. Returns: output (VisImage): image object with visualizations. """ pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata) if self._instance_mode == ColorMode.IMAGE_BW: self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask())) # draw mask for all semantic segments first i.e. "stuff" for mask, sinfo in pred.semantic_masks(): category_idx = sinfo["category_id"] try: mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]] except AttributeError: mask_color = None text = self.metadata.stuff_classes[category_idx] self.draw_binary_mask( mask, color=mask_color, edge_color=_OFF_WHITE, text=text, alpha=alpha, area_threshold=area_threshold, ) # draw mask for all instances second all_instances = list(pred.instance_masks()) if len(all_instances) == 0: return self.output masks, sinfo = list(zip(*all_instances)) category_ids = [x["category_id"] for x in sinfo] try: scores = [x["score"] for x in sinfo] except KeyError: scores = None labels = _create_text_labels( category_ids, scores, self.metadata.thing_classes, [x.get("iscrowd", 0) for x in sinfo] ) try: colors = [ self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids ] except AttributeError: colors = None self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha) return self.output draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility def draw_dataset_dict(self, dic): """ Draw annotations/segmentaions in Detectron2 Dataset format. Args: dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format. Returns: output (VisImage): image object with visualizations. """ annos = dic.get("annotations", None) if annos: if "segmentation" in annos[0]: masks = [x["segmentation"] for x in annos] else: masks = None if "keypoints" in annos[0]: keypts = [x["keypoints"] for x in annos] keypts = np.array(keypts).reshape(len(annos), -1, 3) else: keypts = None boxes = [ BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) if len(x["bbox"]) == 4 else x["bbox"] for x in annos ] colors = None category_ids = [x["category_id"] for x in annos] if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): colors = [ self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids ] names = self.metadata.get("thing_classes", None) labels = _create_text_labels( category_ids, scores=None, class_names=names, is_crowd=[x.get("iscrowd", 0) for x in annos], ) self.overlay_instances( labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors ) sem_seg = dic.get("sem_seg", None) if sem_seg is None and "sem_seg_file_name" in dic: with PathManager.open(dic["sem_seg_file_name"], "rb") as f: sem_seg = Image.open(f) sem_seg = np.asarray(sem_seg, dtype="uint8") if sem_seg is not None: self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5) pan_seg = dic.get("pan_seg", None) if pan_seg is None and "pan_seg_file_name" in dic: with PathManager.open(dic["pan_seg_file_name"], "rb") as f: pan_seg = Image.open(f) pan_seg = np.asarray(pan_seg) pan_seg = rgb2id(pan_seg) if pan_seg is not None: segments_info = dic["segments_info"] pan_seg = torch.tensor(pan_seg) self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5) return self.output def overlay_instances( self, *, boxes=None, labels=None, masks=None, keypoints=None, assigned_colors=None, alpha=0.5, ): """ Args: boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`, or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image, or a :class:`RotatedBoxes`, or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format for the N objects in a single image, labels (list[str]): the text to be displayed for each instance. masks (masks-like object): Supported types are: * :class:`detectron2.structures.PolygonMasks`, :class:`detectron2.structures.BitMasks`. * list[list[ndarray]]: contains the segmentation masks for all objects in one image. The first level of the list corresponds to individual instances. The second level to all the polygon that compose the instance, and the third level to the polygon coordinates. The third level should have the format of [x0, y0, x1, y1, ..., xn, yn] (n >= 3). * list[ndarray]: each ndarray is a binary mask of shape (H, W). * list[dict]: each dict is a COCO-style RLE. keypoints (Keypoint or array like): an array-like object of shape (N, K, 3), where the N is the number of instances and K is the number of keypoints. The last dimension corresponds to (x, y, visibility or score). assigned_colors (list[matplotlib.colors]): a list of colors, where each color corresponds to each mask or box in the image. Refer to 'matplotlib.colors' for full list of formats that the colors are accepted in. Returns: output (VisImage): image object with visualizations. """ num_instances = 0 if boxes is not None: boxes = self._convert_boxes(boxes) num_instances = len(boxes) if masks is not None: masks = self._convert_masks(masks) if num_instances: assert len(masks) == num_instances else: num_instances = len(masks) if keypoints is not None: if num_instances: assert len(keypoints) == num_instances else: num_instances = len(keypoints) keypoints = self._convert_keypoints(keypoints) if labels is not None: assert len(labels) == num_instances if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
8
2023-12-22 13:31:33+00:00
24k
xhuangcv/humannorm
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF from tqdm import tqdm import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh
16,107
def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) # Initialize SDF to a given shape when no weights are provided or force_shape_init is True optim = torch.optim.Adam(self.parameters(), lr=1e-3) for _ in tqdm( range(1000), desc=f"Initializing SDF to a(n) {self.cfg.shape_init}:", disable=get_rank() != 0, ): points_rand = ( torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0 ) sdf_gt = get_gt_sdf(points_rand) sdf_pred = self.forward_sdf(points_rand) loss = F.mse_loss(sdf_pred, sdf_gt) optim.zero_grad() loss.backward() optim.step() # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh, _ = other.isosurface() instance.isosurface_bbox = mesh.extras["bbox"] instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return if self.cfg.sdf_bias != 0.0: threestudio.warn( "shape_init and sdf_bias are both specified, which may lead to unexpected results." ) get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") scene = trimesh.load(mesh_path) if isinstance(scene, trimesh.Trimesh): mesh = scene elif isinstance(scene, trimesh.scene.Scene): mesh = trimesh.Trimesh() for obj in scene.geometry.values(): mesh = trimesh.util.concatenate([mesh, obj]) else: raise ValueError(f"Unknown mesh type at {mesh_path}.") # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) # Initialize SDF to a given shape when no weights are provided or force_shape_init is True optim = torch.optim.Adam(self.parameters(), lr=1e-3) for _ in tqdm( range(1000), desc=f"Initializing SDF to a(n) {self.cfg.shape_init}:", disable=get_rank() != 0, ): points_rand = ( torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0 ) sdf_gt = get_gt_sdf(points_rand) sdf_pred = self.forward_sdf(points_rand) loss = F.mse_loss(sdf_pred, sdf_gt) optim.zero_grad() loss.backward() optim.step() # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh, _ = other.isosurface() instance.isosurface_bbox = mesh.extras["bbox"] instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
elif isinstance(other, ImplicitSDF):
3
2023-12-23 12:37:48+00:00
24k
dakpinaroglu/Frame2seq
frame2seq/openfold/model/structure_module.py
[ { "identifier": "Linear", "path": "frame2seq/openfold/model/primitives.py", "snippet": "class Linear(nn.Linear):\n \"\"\"\n A Linear layer with built-in nonstandard initializations. Called just\n like torch.nn.Linear.\n\n Implements the initializers in 1.11.4, plus some additional ones found...
from functools import reduce from operator import mul from typing import Optional, Tuple, Sequence from frame2seq.openfold.model.primitives import Linear, LayerNorm, ipa_point_weights_init_ from frame2seq.openfold.np.residue_constants import ( restype_rigid_group_default_frame, restype_atom14_to_rigid_group, restype_atom14_mask, restype_atom14_rigid_group_positions, ) from frame2seq.openfold.utils.feats import ( frames_and_literature_positions_to_atom14_pos, torsion_angles_to_frames, ) from frame2seq.openfold.utils.precision_utils import is_fp16_enabled from frame2seq.openfold.utils.rigid_utils import Rotation, Rigid from frame2seq.openfold.utils.tensor_utils import ( dict_multimap, permute_final_dims, flatten_final_dims, ) import importlib import math import sys import torch import torch.nn as nn
14,779
# [*, N_res, H * C_hidden] o = flatten_final_dims(o, 2) # [*, H, 3, N_res, P_v] if(inplace_safe): v_pts = permute_final_dims(v_pts, (1, 3, 0, 2)) o_pt = [ torch.matmul(a, v.to(a.dtype)) for v in torch.unbind(v_pts, dim=-3) ] o_pt = torch.stack(o_pt, dim=-3) else: o_pt = torch.sum( ( a[..., None, :, :, None] * permute_final_dims(v_pts, (1, 3, 0, 2))[..., None, :, :] ), dim=-2, ) # [*, N_res, H, P_v, 3] o_pt = permute_final_dims(o_pt, (2, 0, 3, 1)) o_pt = r[..., None, None].invert_apply(o_pt) # [*, N_res, H * P_v] o_pt_norm = flatten_final_dims( torch.sqrt(torch.sum(o_pt ** 2, dim=-1) + self.eps), 2 ) # [*, N_res, H * P_v, 3] o_pt = o_pt.reshape(*o_pt.shape[:-3], -1, 3) if(_offload_inference): z[0] = z[0].to(o_pt.device) # [*, N_res, H, C_z] o_pair = torch.matmul(a.transpose(-2, -3), z[0].to(dtype=a.dtype)) # [*, N_res, H * C_z] o_pair = flatten_final_dims(o_pair, 2) # [*, N_res, C_s] s = self.linear_out( torch.cat( (o, *torch.unbind(o_pt, dim=-1), o_pt_norm, o_pair), dim=-1 ).to(dtype=z[0].dtype) ) return s class BackboneUpdate(nn.Module): """ Implements part of Algorithm 23. """ def __init__(self, c_s): """ Args: c_s: Single representation channel dimension """ super(BackboneUpdate, self).__init__() self.c_s = c_s self.linear = Linear(self.c_s, 6, init="final") def forward(self, s: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: [*, N_res, C_s] single representation Returns: [*, N_res, 6] update vector """ # [*, 6] update = self.linear(s) return update class StructureModuleTransitionLayer(nn.Module): def __init__(self, c): super(StructureModuleTransitionLayer, self).__init__() self.c = c self.linear_1 = Linear(self.c, self.c, init="relu") self.linear_2 = Linear(self.c, self.c, init="relu") self.linear_3 = Linear(self.c, self.c, init="final") self.relu = nn.ReLU() def forward(self, s): s_initial = s s = self.linear_1(s) s = self.relu(s) s = self.linear_2(s) s = self.relu(s) s = self.linear_3(s) s = s + s_initial return s class StructureModuleTransition(nn.Module): def __init__(self, c, num_layers, dropout_rate): super(StructureModuleTransition, self).__init__() self.c = c self.num_layers = num_layers self.dropout_rate = dropout_rate self.layers = nn.ModuleList() for _ in range(self.num_layers): l = StructureModuleTransitionLayer(self.c) self.layers.append(l) self.dropout = nn.Dropout(self.dropout_rate)
# Copyright 2021 AlQuraishi Laboratory # Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. attn_core_inplace_cuda = False class AngleResnetBlock(nn.Module): def __init__(self, c_hidden): """ Args: c_hidden: Hidden channel dimension """ super(AngleResnetBlock, self).__init__() self.c_hidden = c_hidden self.linear_1 = Linear(self.c_hidden, self.c_hidden, init="relu") self.linear_2 = Linear(self.c_hidden, self.c_hidden, init="final") self.relu = nn.ReLU() def forward(self, a: torch.Tensor) -> torch.Tensor: s_initial = a a = self.relu(a) a = self.linear_1(a) a = self.relu(a) a = self.linear_2(a) return a + s_initial class AngleResnet(nn.Module): """ Implements Algorithm 20, lines 11-14 """ def __init__(self, c_in, c_hidden, no_blocks, no_angles, epsilon): """ Args: c_in: Input channel dimension c_hidden: Hidden channel dimension no_blocks: Number of resnet blocks no_angles: Number of torsion angles to generate epsilon: Small constant for normalization """ super(AngleResnet, self).__init__() self.c_in = c_in self.c_hidden = c_hidden self.no_blocks = no_blocks self.no_angles = no_angles self.eps = epsilon self.linear_in = Linear(self.c_in, self.c_hidden) self.linear_initial = Linear(self.c_in, self.c_hidden) self.layers = nn.ModuleList() for _ in range(self.no_blocks): layer = AngleResnetBlock(c_hidden=self.c_hidden) self.layers.append(layer) self.linear_out = Linear(self.c_hidden, self.no_angles * 2) self.relu = nn.ReLU() def forward( self, s: torch.Tensor, s_initial: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: s: [*, C_hidden] single embedding s_initial: [*, C_hidden] single embedding as of the start of the StructureModule Returns: [*, no_angles, 2] predicted angles """ # NOTE: The ReLU's applied to the inputs are absent from the supplement # pseudocode but present in the source. For maximal compatibility with # the pretrained weights, I'm going with the source. # [*, C_hidden] s_initial = self.relu(s_initial) s_initial = self.linear_initial(s_initial) s = self.relu(s) s = self.linear_in(s) s = s + s_initial for l in self.layers: s = l(s) s = self.relu(s) # [*, no_angles * 2] s = self.linear_out(s) # [*, no_angles, 2] s = s.view(s.shape[:-1] + (-1, 2)) unnormalized_s = s norm_denom = torch.sqrt( torch.clamp( torch.sum(s ** 2, dim=-1, keepdim=True), min=self.eps, ) ) s = s / norm_denom return unnormalized_s, s class InvariantPointAttention(nn.Module): """ Implements Algorithm 22. """ def __init__( self, c_s: int, c_z: int, c_hidden: int, no_heads: int, no_qk_points: int, no_v_points: int, inf: float = 1e5, eps: float = 1e-8, ): """ Args: c_s: Single representation channel dimension c_z: Pair representation channel dimension c_hidden: Hidden channel dimension no_heads: Number of attention heads no_qk_points: Number of query/key points to generate no_v_points: Number of value points to generate """ super(InvariantPointAttention, self).__init__() self.c_s = c_s self.c_z = c_z self.c_hidden = c_hidden self.no_heads = no_heads self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.inf = inf self.eps = eps # These linear layers differ from their specifications in the # supplement. There, they lack bias and use Glorot initialization. # Here as in the official source, they have bias and use the default # Lecun initialization. hc = self.c_hidden * self.no_heads self.linear_q = Linear(self.c_s, hc) self.linear_kv = Linear(self.c_s, 2 * hc) hpq = self.no_heads * self.no_qk_points * 3 self.linear_q_points = Linear(self.c_s, hpq) hpkv = self.no_heads * (self.no_qk_points + self.no_v_points) * 3 self.linear_kv_points = Linear(self.c_s, hpkv) hpv = self.no_heads * self.no_v_points * 3 self.linear_b = Linear(self.c_z, self.no_heads) self.head_weights = nn.Parameter(torch.zeros((no_heads))) ipa_point_weights_init_(self.head_weights) concat_out_dim = self.no_heads * ( self.c_z + self.c_hidden + self.no_v_points * 4 ) self.linear_out = Linear(concat_out_dim, self.c_s, init="final") self.softmax = nn.Softmax(dim=-1) self.softplus = nn.Softplus() def forward( self, s: torch.Tensor, z: Optional[torch.Tensor], r: Rigid, mask: torch.Tensor, inplace_safe: bool = False, _offload_inference: bool = False, _z_reference_list: Optional[Sequence[torch.Tensor]] = None, attn_drop_rate = 0.0, ) -> torch.Tensor: """ Args: s: [*, N_res, C_s] single representation z: [*, N_res, N_res, C_z] pair representation r: [*, N_res] transformation object mask: [*, N_res] mask Returns: [*, N_res, C_s] single representation update """ if(_offload_inference and inplace_safe): z = _z_reference_list else: z = [z] ####################################### # Generate scalar and point activations ####################################### # [*, N_res, H * C_hidden] q = self.linear_q(s) kv = self.linear_kv(s) # [*, N_res, H, C_hidden] q = q.view(q.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, 2 * C_hidden] kv = kv.view(kv.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, C_hidden] k, v = torch.split(kv, self.c_hidden, dim=-1) # [*, N_res, H * P_q * 3] q_pts = self.linear_q_points(s) # This is kind of clunky, but it's how the original does it # [*, N_res, H * P_q, 3] q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1) q_pts = torch.stack(q_pts, dim=-1) q_pts = r[..., None].apply(q_pts) # [*, N_res, H, P_q, 3] q_pts = q_pts.view( q_pts.shape[:-2] + (self.no_heads, self.no_qk_points, 3) ) # [*, N_res, H * (P_q + P_v) * 3] kv_pts = self.linear_kv_points(s) # [*, N_res, H * (P_q + P_v), 3] kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1) kv_pts = torch.stack(kv_pts, dim=-1) kv_pts = r[..., None].apply(kv_pts) # [*, N_res, H, (P_q + P_v), 3] kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.no_heads, -1, 3)) # [*, N_res, H, P_q/P_v, 3] k_pts, v_pts = torch.split( kv_pts, [self.no_qk_points, self.no_v_points], dim=-2 ) ########################## # Compute attention scores ########################## # [*, N_res, N_res, H] b = self.linear_b(z[0]) if(_offload_inference): assert(sys.getrefcount(z[0]) == 2) z[0] = z[0].cpu() # [*, H, N_res, N_res] if(is_fp16_enabled()): with torch.cuda.amp.autocast(enabled=False): a = torch.matmul( permute_final_dims(q.float(), (1, 0, 2)), # [*, H, N_res, C_hidden] permute_final_dims(k.float(), (1, 2, 0)), # [*, H, C_hidden, N_res] ) else: a = torch.matmul( permute_final_dims(q, (1, 0, 2)), # [*, H, N_res, C_hidden] permute_final_dims(k, (1, 2, 0)), # [*, H, C_hidden, N_res] ) a *= math.sqrt(1.0 / (3 * self.c_hidden)) a += (math.sqrt(1.0 / 3) * permute_final_dims(b, (2, 0, 1))) # [*, N_res, N_res, H, P_q, 3] pt_att = q_pts.unsqueeze(-4) - k_pts.unsqueeze(-5) if(inplace_safe): pt_att *= pt_att else: pt_att = pt_att ** 2 # [*, N_res, N_res, H, P_q] pt_att = sum(torch.unbind(pt_att, dim=-1)) head_weights = self.softplus(self.head_weights).view( *((1,) * len(pt_att.shape[:-2]) + (-1, 1)) ) head_weights = head_weights * math.sqrt( 1.0 / (3 * (self.no_qk_points * 9.0 / 2)) ) if(inplace_safe): pt_att *= head_weights else: pt_att = pt_att * head_weights # [*, N_res, N_res, H] pt_att = torch.sum(pt_att, dim=-1) * (-0.5) # [*, N_res, N_res] square_mask = mask.unsqueeze(-1) * mask.unsqueeze(-2) square_mask = self.inf * (square_mask - 1) """ Frame2seq implementation of IPA regularization via attention dropout """ if attn_drop_rate > 0.0: random_square_mask = torch.rand(square_mask.shape, device=square_mask.device) random_square_mask = self.inf * -1 * (random_square_mask < attn_drop_rate) square_mask += random_square_mask # [*, H, N_res, N_res] pt_att = permute_final_dims(pt_att, (2, 0, 1)) if(inplace_safe): a += pt_att del pt_att a += square_mask.unsqueeze(-3) # in-place softmax attn_core_inplace_cuda.forward_( a, reduce(mul, a.shape[:-1]), a.shape[-1], ) else: a = a + pt_att a = a + square_mask.unsqueeze(-3) a = self.softmax(a) ################ # Compute output ################ # [*, N_res, H, C_hidden] o = torch.matmul( a, v.transpose(-2, -3).to(dtype=a.dtype) ).transpose(-2, -3) # [*, N_res, H * C_hidden] o = flatten_final_dims(o, 2) # [*, H, 3, N_res, P_v] if(inplace_safe): v_pts = permute_final_dims(v_pts, (1, 3, 0, 2)) o_pt = [ torch.matmul(a, v.to(a.dtype)) for v in torch.unbind(v_pts, dim=-3) ] o_pt = torch.stack(o_pt, dim=-3) else: o_pt = torch.sum( ( a[..., None, :, :, None] * permute_final_dims(v_pts, (1, 3, 0, 2))[..., None, :, :] ), dim=-2, ) # [*, N_res, H, P_v, 3] o_pt = permute_final_dims(o_pt, (2, 0, 3, 1)) o_pt = r[..., None, None].invert_apply(o_pt) # [*, N_res, H * P_v] o_pt_norm = flatten_final_dims( torch.sqrt(torch.sum(o_pt ** 2, dim=-1) + self.eps), 2 ) # [*, N_res, H * P_v, 3] o_pt = o_pt.reshape(*o_pt.shape[:-3], -1, 3) if(_offload_inference): z[0] = z[0].to(o_pt.device) # [*, N_res, H, C_z] o_pair = torch.matmul(a.transpose(-2, -3), z[0].to(dtype=a.dtype)) # [*, N_res, H * C_z] o_pair = flatten_final_dims(o_pair, 2) # [*, N_res, C_s] s = self.linear_out( torch.cat( (o, *torch.unbind(o_pt, dim=-1), o_pt_norm, o_pair), dim=-1 ).to(dtype=z[0].dtype) ) return s class BackboneUpdate(nn.Module): """ Implements part of Algorithm 23. """ def __init__(self, c_s): """ Args: c_s: Single representation channel dimension """ super(BackboneUpdate, self).__init__() self.c_s = c_s self.linear = Linear(self.c_s, 6, init="final") def forward(self, s: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: [*, N_res, C_s] single representation Returns: [*, N_res, 6] update vector """ # [*, 6] update = self.linear(s) return update class StructureModuleTransitionLayer(nn.Module): def __init__(self, c): super(StructureModuleTransitionLayer, self).__init__() self.c = c self.linear_1 = Linear(self.c, self.c, init="relu") self.linear_2 = Linear(self.c, self.c, init="relu") self.linear_3 = Linear(self.c, self.c, init="final") self.relu = nn.ReLU() def forward(self, s): s_initial = s s = self.linear_1(s) s = self.relu(s) s = self.linear_2(s) s = self.relu(s) s = self.linear_3(s) s = s + s_initial return s class StructureModuleTransition(nn.Module): def __init__(self, c, num_layers, dropout_rate): super(StructureModuleTransition, self).__init__() self.c = c self.num_layers = num_layers self.dropout_rate = dropout_rate self.layers = nn.ModuleList() for _ in range(self.num_layers): l = StructureModuleTransitionLayer(self.c) self.layers.append(l) self.dropout = nn.Dropout(self.dropout_rate)
self.layer_norm = LayerNorm(self.c)
1
2023-12-25 09:29:36+00:00
24k
iKala/ievals
ievals/cli/ieval.py
[ { "identifier": "TGI_Evaluator", "path": "ievals/modules/qa_evaluators/tgi.py", "snippet": "class TGI_Evaluator(Evaluator):\n def __init__(\n self,\n choices,\n k,\n ip_addr,\n model_name,\n systemMessageToken=\"<|im_start|>system\\n\",\n messageEndTok...
import os import logging import argparse import pandas as pd from datasets import load_dataset from ievals.modules.qa_evaluators.tgi import TGI_Evaluator from ievals.modules.qa_evaluators.gemini import Gemini_Evaluator from ievals.modules.qa_evaluators.claude import Claude_Evaluator from ievals.modules.qa_evaluators.azure import Azure_Evaluator from ievals.modules.qa_evaluators.oai_complete import GPT_Evaluator from ievals.modules.qa_evaluators.chatgpt import ChatGPT_Evaluator from ievals.modules.qa_evaluators.hf_chat import HF_Chat_Evaluator from ievals.modules.qa_evaluators.hf_base import ( Qwen_Evaluator, ) # we only use this for qwen base model from ievals.modules.qa_evaluators.ali_dashscope import DashScope_Evaluator from ievals.exp_executer import run_exp
19,443
""" CLI for all models Support mode: if tgi service was used you must pass in IP and hostname if the service was found in model_config.csv you could skip providing the 4 tokens (user, assistant, system, eos) else you need to pass in the four token in args """ try: except ImportError as e: logging.error("huggingface and qwen models are not supported due to " + str(e)) def get_model_config(): current_dir = os.path.dirname(os.path.abspath(__file__)) up_dir = os.path.abspath(os.path.join(current_dir, os.pardir)) df = pd.read_csv(os.path.join(up_dir, "model_config.csv")) df.fillna("", inplace=True) valid_model_names = df["model_name"].tolist() return valid_model_names, df def get_tgi_prompt_config(model_name): valid_model_names, df = get_model_config() if model_name not in valid_model_names: return None, None prompt_config = df[df["model_name"] == model_name].iloc[0] prompt_config.pop("model_name") return prompt_config def get_evaluator(model_name, series=""): if len(series): if series == "azure": return Azure_Evaluator elif series == "openai_chat": return ChatGPT_Evaluator elif series == "openai_complete":
""" CLI for all models Support mode: if tgi service was used you must pass in IP and hostname if the service was found in model_config.csv you could skip providing the 4 tokens (user, assistant, system, eos) else you need to pass in the four token in args """ try: except ImportError as e: logging.error("huggingface and qwen models are not supported due to " + str(e)) def get_model_config(): current_dir = os.path.dirname(os.path.abspath(__file__)) up_dir = os.path.abspath(os.path.join(current_dir, os.pardir)) df = pd.read_csv(os.path.join(up_dir, "model_config.csv")) df.fillna("", inplace=True) valid_model_names = df["model_name"].tolist() return valid_model_names, df def get_tgi_prompt_config(model_name): valid_model_names, df = get_model_config() if model_name not in valid_model_names: return None, None prompt_config = df[df["model_name"] == model_name].iloc[0] prompt_config.pop("model_name") return prompt_config def get_evaluator(model_name, series=""): if len(series): if series == "azure": return Azure_Evaluator elif series == "openai_chat": return ChatGPT_Evaluator elif series == "openai_complete":
return GPT_Evaluator
4
2023-12-24 08:00:38+00:00
24k
kraina-ai/quackosm
quackosm/functions.py
[ { "identifier": "GroupedOsmTagsFilter", "path": "quackosm/_osm_tags_filters.py", "snippet": "def merge_osm_tags_filter(osm_tags_filter: OsmTagsFilter) -> OsmTagsFilter: ...\ndef merge_osm_tags_filter(osm_tags_filter: GroupedOsmTagsFilter) -> OsmTagsFilter: ...\ndef merge_osm_tags_filter(osm_tags_filter:...
from collections.abc import Iterable from pathlib import Path from typing import Any, Optional, Union from shapely.geometry.base import BaseGeometry from quackosm._osm_tags_filters import GroupedOsmTagsFilter, OsmTagsFilter from quackosm._osm_way_polygon_features import OsmWayPolygonConfig from quackosm.pbf_file_reader import PbfFileReader import geopandas as gpd
21,439
│ node/10025656392 │ {name=Direction de… │ POINT (7.4270392 43.7365262) │ │ node/10025656393 │ {name=IQOS, openin… │ POINT (7.4275175 43.7373195) │ │ node/10025656394 │ {artist_name=Anna … │ POINT (7.4293446 43.737448) │ │ · │ · │ · │ │ · │ · │ · │ │ · │ · │ · │ │ way/986864693 │ {natural=bare_rock} │ POLYGON ((7.4340482 43.745598, 7.4340263 4… │ │ way/986864694 │ {barrier=wall} │ LINESTRING (7.4327547 43.7445382, 7.432808… │ │ way/986864695 │ {natural=bare_rock} │ POLYGON ((7.4332994 43.7449315, 7.4332912 … │ │ way/986864696 │ {barrier=wall} │ LINESTRING (7.4356006 43.7464325, 7.435574… │ │ way/986864697 │ {natural=bare_rock} │ POLYGON ((7.4362767 43.74697, 7.4362983 43… │ │ way/990669427 │ {amenity=shelter, … │ POLYGON ((7.4146087 43.733883, 7.4146192 4… │ │ way/990669428 │ {highway=secondary… │ LINESTRING (7.4136598 43.7334433, 7.413640… │ │ way/990669429 │ {highway=secondary… │ LINESTRING (7.4137621 43.7334251, 7.413746… │ │ way/990848785 │ {addr:city=Monaco,… │ POLYGON ((7.4142551 43.7339622, 7.4143113 … │ │ way/993121275 │ {building=yes, nam… │ POLYGON ((7.4321416 43.7481309, 7.4321638 … │ ├──────────────────┴──────────────────────┴──────────────────────────────────────────────┤ │ 7906 rows (20 shown) 3 columns │ └────────────────────────────────────────────────────────────────────────────────────────┘ Get only buildings, amenities and highways from a PBF file. Tags will be split into separate columns because of applying the filter. >>> gpq_path = qosm.convert_pbf_to_gpq( ... monaco_pbf_path, ... tags_filter={"building": True, "amenity": True, "highway": True} ... ) >>> gpq_path.as_posix() 'files/monaco_6593ca69098459d039054bc5fe0a87c56681e29a5f59d38ce3485c03cb0e9374_noclip_exploded.geoparquet' Inspect the file with duckdb >>> import duckdb >>> duckdb.load_extension('spatial') >>> duckdb.read_parquet(str(gpq_path)).project( ... "* REPLACE (ST_GeomFromWKB(geometry) AS geometry)" ... ).order("feature_id") # doctest: +SKIP ┌──────────────────┬──────────┬────────────┬─────────────┬───────────────────────────────┐ │ feature_id │ building │ amenity │ highway │ geometry │ │ varchar │ varchar │ varchar │ varchar │ geometry │ ├──────────────────┼──────────┼────────────┼─────────────┼───────────────────────────────┤ │ node/10025656390 │ NULL │ restaurant │ NULL │ POINT (7.4269287 43.7368818) │ │ node/10025843517 │ NULL │ restaurant │ NULL │ POINT (7.4219362 43.7367446) │ │ node/10025852089 │ NULL │ bar │ NULL │ POINT (7.4227543 43.7369926) │ │ node/10025852090 │ NULL │ restaurant │ NULL │ POINT (7.4225093 43.7369627) │ │ node/10068880332 │ NULL │ NULL │ platform │ POINT (7.4380849 43.7493273) │ │ node/10068880335 │ NULL │ bench │ NULL │ POINT (7.4186855 43.7321515) │ │ node/10127713363 │ NULL │ cafe │ NULL │ POINT (7.4266367 43.7420755) │ │ node/10601158089 │ NULL │ restaurant │ NULL │ POINT (7.4213086 43.7336187) │ │ node/10671507005 │ NULL │ bar │ NULL │ POINT (7.4296915 43.7423307) │ │ node/10674256605 │ NULL │ bar │ NULL │ POINT (7.4213558 43.7336317) │ │ · │ · │ · │ · │ · │ │ · │ · │ · │ · │ · │ │ · │ · │ · │ · │ · │ │ way/981971425 │ NULL │ NULL │ residential │ LINESTRING (7.4321217 43.74… │ │ way/982061461 │ NULL │ NULL │ secondary │ LINESTRING (7.4246341 43.74… │ │ way/982081599 │ NULL │ NULL │ tertiary │ LINESTRING (7.4225202 43.73… │ │ way/982081600 │ NULL │ NULL │ service │ LINESTRING (7.4225202 43.73… │ │ way/986029035 │ NULL │ NULL │ path │ LINESTRING (7.4189462 43.73… │ │ way/990669427 │ NULL │ shelter │ NULL │ POLYGON ((7.4146087 43.7338… │ │ way/990669428 │ NULL │ NULL │ secondary │ LINESTRING (7.4136598 43.73… │ │ way/990669429 │ NULL │ NULL │ secondary │ LINESTRING (7.4137621 43.73… │ │ way/990848785 │ yes │ NULL │ NULL │ POLYGON ((7.4142551 43.7339… │ │ way/993121275 │ yes │ NULL │ NULL │ POLYGON ((7.4321416 43.7481… │ ├──────────────────┴──────────┴────────────┴─────────────┴───────────────────────────────┤ │ 5772 rows (20 shown) 5 columns │ └────────────────────────────────────────────────────────────────────────────────────────┘ Get features for Malé - the capital city of Maldives Tags will be kept in a single column. >>> from shapely.geometry import box >>> gpq_path = qosm.convert_pbf_to_gpq( ... maldives_pbf_path, ... geometry_filter=box( ... minx=73.4975872, ... miny=4.1663240, ... maxx=73.5215528, ... maxy=4.1818121 ... ) ... ) >>> gpq_path.as_posix() 'files/maldives_nofilter_35532d32333a47a057265be0d7903ce27f6aa6ca3df31fe45f4ce67e4dbb3fb5_compact.geoparquet' Inspect the file with duckdb >>> import duckdb >>> duckdb.load_extension('spatial') >>> duckdb.read_parquet(str(gpq_path)).project( ... "* REPLACE (ST_GeomFromWKB(geometry) AS geometry)" ... ).order("feature_id") # doctest: +SKIP ┌──────────────────┬──────────────────────┬──────────────────────────────────────────────┐ │ feature_id │ tags │ geometry │ │ varchar │ map(varchar, varch… │ geometry │ ├──────────────────┼──────────────────────┼──────────────────────────────────────────────┤ │ node/10010180778 │ {brand=Ooredoo, br… │ POINT (73.5179039 4.1752105) │ │ node/10062500171 │ {contact:facebook=… │ POINT (73.509583 4.1724485) │ │ node/10078084764 │ {addr:city=Male', … │ POINT (73.5047972 4.1726734) │ │ node/10078086040 │ {addr:city=Malé, a… │ POINT (73.5031714 4.1759622) │ │ node/10158825718 │ {addr:postcode=201… │ POINT (73.5083189 4.1730108) │ │ node/10289176711 │ {addr:street=Dhona… │ POINT (73.5133902 4.1725724) │ │ node/10294045310 │ {amenity=restauran… │ POINT (73.5091277 4.1735378) │ │ node/10294045311 │ {amenity=restauran… │ POINT (73.5055534 4.1759515) │ │ node/10294045411 │ {amenity=restauran… │ POINT (73.5037257 4.1717866) │ │ node/10294045412 │ {amenity=restauran… │ POINT (73.5024147 4.1761633) │ │ · │ · │ · │ │ · │ · │ · │ │ · │ · │ · │ │ way/91986244 │ {highway=residenti… │ LINESTRING (73.5069785 4.1704686, 73.50759… │ │ way/91986245 │ {highway=residenti… │ LINESTRING (73.5135834 4.1740562, 73.51383… │ │ way/91986249 │ {highway=residenti… │ LINESTRING (73.5153971 4.1735146, 73.51601… │ │ way/91986251 │ {highway=residenti… │ LINESTRING (73.5082522 4.1709887, 73.50823… │ │ way/91986254 │ {highway=residenti… │ LINESTRING (73.508114 4.1693477, 73.508154… │ │ way/91986255 │ {landuse=cemetery,… │ POLYGON ((73.507509 4.1731064, 73.5078884 … │ │ way/91986256 │ {highway=residenti… │ LINESTRING (73.5106692 4.1744828, 73.51082… │ │ way/935784864 │ {layer=-1, locatio… │ LINESTRING (73.4875382 4.1703263, 73.50074… │ │ way/935784867 │ {layer=-1, locatio… │ LINESTRING (73.446172 4.1856738, 73.460937… │ │ way/959150179 │ {amenity=place_of_… │ POLYGON ((73.5184052 4.1755282, 73.5184863… │ ├──────────────────┴──────────────────────┴──────────────────────────────────────────────┤ │ 2140 rows (20 shown) 3 columns │ └────────────────────────────────────────────────────────────────────────────────────────┘ """
""" Functions. This module contains helper functions to simplify the usage. """ def convert_pbf_to_gpq( pbf_path: Union[str, Path], tags_filter: Optional[Union[OsmTagsFilter, GroupedOsmTagsFilter]] = None, geometry_filter: Optional[BaseGeometry] = None, result_file_path: Optional[Union[str, Path]] = None, explode_tags: Optional[bool] = None, ignore_cache: bool = False, filter_osm_ids: Optional[list[str]] = None, working_directory: Union[str, Path] = "files", osm_way_polygon_features_config: Optional[Union[OsmWayPolygonConfig, dict[str, Any]]] = None, ) -> Path: """ Convert PBF file to GeoParquet file. Args: pbf_path (Union[str, Path]): Pbf file to be parsed to GeoParquet. tags_filter (Union[OsmTagsFilter, GroupedOsmTagsFilter], optional): A dictionary specifying which tags to download. The keys should be OSM tags (e.g. `building`, `amenity`). The values should either be `True` for retrieving all objects with the tag, string for retrieving a single tag-value pair or list of strings for retrieving all values specified in the list. `tags={'leisure': 'park}` would return parks from the area. `tags={'leisure': 'park, 'amenity': True, 'shop': ['bakery', 'bicycle']}` would return parks, all amenity types, bakeries and bicycle shops. If `None`, handler will allow all of the tags to be parsed. Defaults to `None`. geometry_filter (BaseGeometry, optional): Region which can be used to filter only intersecting OSM objects. Defaults to `None`. result_file_path (Union[str, Path], optional): Where to save the geoparquet file. If not provided, will be generated based on hashes from provided tags filter and geometry filter. Defaults to `None`. explode_tags (bool, optional): Whether to split tags into columns based on OSM tag keys. If `None`, will be set based on `tags_filter` parameter. If no tags filter is provided, then `explode_tags` will set to `False`, if there is tags filter it will set to `True`. Defaults to `None`. ignore_cache (bool, optional): Whether to ignore precalculated geoparquet files or not. Defaults to False. filter_osm_ids: (list[str], optional): List of OSM features ids to read from the file. Have to be in the form of 'node/<id>', 'way/<id>' or 'relation/<id>'. Defaults to an empty list. working_directory (Union[str, Path], optional): Directory where to save the parsed `*.parquet` files. Defaults to "files". osm_way_polygon_features_config (Union[OsmWayPolygonConfig, dict[str, Any]], optional): Config used to determine which closed way features are polygons. Modifications to this config left are left for experienced OSM users. Defaults to predefined "osm_way_polygon_features.json". Returns: Path: Path to the generated GeoParquet file. Examples: Get OSM data from a PBF file. Tags will be kept in a single column. >>> import quackosm as qosm >>> gpq_path = qosm.convert_pbf_to_gpq(monaco_pbf_path) >>> gpq_path.as_posix() 'files/monaco_nofilter_noclip_compact.geoparquet' Inspect the file with duckdb >>> import duckdb >>> duckdb.load_extension('spatial') >>> duckdb.read_parquet(str(gpq_path)).project( ... "* REPLACE (ST_GeomFromWKB(geometry) AS geometry)" ... ).order("feature_id") # doctest: +SKIP ┌──────────────────┬──────────────────────┬──────────────────────────────────────────────┐ │ feature_id │ tags │ geometry │ │ varchar │ map(varchar, varch… │ geometry │ ├──────────────────┼──────────────────────┼──────────────────────────────────────────────┤ │ node/10005045289 │ {shop=bakery} │ POINT (7.4224498 43.7310532) │ │ node/10020887517 │ {leisure=swimming_… │ POINT (7.4131561 43.7338391) │ │ node/10021298117 │ {leisure=swimming_… │ POINT (7.4277743 43.7427669) │ │ node/10021298717 │ {leisure=swimming_… │ POINT (7.4263029 43.7409734) │ │ node/10025656383 │ {ferry=yes, name=Q… │ POINT (7.4254971 43.7369002) │ │ node/10025656390 │ {amenity=restauran… │ POINT (7.4269287 43.7368818) │ │ node/10025656391 │ {name=Capitainerie… │ POINT (7.4272127 43.7359593) │ │ node/10025656392 │ {name=Direction de… │ POINT (7.4270392 43.7365262) │ │ node/10025656393 │ {name=IQOS, openin… │ POINT (7.4275175 43.7373195) │ │ node/10025656394 │ {artist_name=Anna … │ POINT (7.4293446 43.737448) │ │ · │ · │ · │ │ · │ · │ · │ │ · │ · │ · │ │ way/986864693 │ {natural=bare_rock} │ POLYGON ((7.4340482 43.745598, 7.4340263 4… │ │ way/986864694 │ {barrier=wall} │ LINESTRING (7.4327547 43.7445382, 7.432808… │ │ way/986864695 │ {natural=bare_rock} │ POLYGON ((7.4332994 43.7449315, 7.4332912 … │ │ way/986864696 │ {barrier=wall} │ LINESTRING (7.4356006 43.7464325, 7.435574… │ │ way/986864697 │ {natural=bare_rock} │ POLYGON ((7.4362767 43.74697, 7.4362983 43… │ │ way/990669427 │ {amenity=shelter, … │ POLYGON ((7.4146087 43.733883, 7.4146192 4… │ │ way/990669428 │ {highway=secondary… │ LINESTRING (7.4136598 43.7334433, 7.413640… │ │ way/990669429 │ {highway=secondary… │ LINESTRING (7.4137621 43.7334251, 7.413746… │ │ way/990848785 │ {addr:city=Monaco,… │ POLYGON ((7.4142551 43.7339622, 7.4143113 … │ │ way/993121275 │ {building=yes, nam… │ POLYGON ((7.4321416 43.7481309, 7.4321638 … │ ├──────────────────┴──────────────────────┴──────────────────────────────────────────────┤ │ 7906 rows (20 shown) 3 columns │ └────────────────────────────────────────────────────────────────────────────────────────┘ Get only buildings, amenities and highways from a PBF file. Tags will be split into separate columns because of applying the filter. >>> gpq_path = qosm.convert_pbf_to_gpq( ... monaco_pbf_path, ... tags_filter={"building": True, "amenity": True, "highway": True} ... ) >>> gpq_path.as_posix() 'files/monaco_6593ca69098459d039054bc5fe0a87c56681e29a5f59d38ce3485c03cb0e9374_noclip_exploded.geoparquet' Inspect the file with duckdb >>> import duckdb >>> duckdb.load_extension('spatial') >>> duckdb.read_parquet(str(gpq_path)).project( ... "* REPLACE (ST_GeomFromWKB(geometry) AS geometry)" ... ).order("feature_id") # doctest: +SKIP ┌──────────────────┬──────────┬────────────┬─────────────┬───────────────────────────────┐ │ feature_id │ building │ amenity │ highway │ geometry │ │ varchar │ varchar │ varchar │ varchar │ geometry │ ├──────────────────┼──────────┼────────────┼─────────────┼───────────────────────────────┤ │ node/10025656390 │ NULL │ restaurant │ NULL │ POINT (7.4269287 43.7368818) │ │ node/10025843517 │ NULL │ restaurant │ NULL │ POINT (7.4219362 43.7367446) │ │ node/10025852089 │ NULL │ bar │ NULL │ POINT (7.4227543 43.7369926) │ │ node/10025852090 │ NULL │ restaurant │ NULL │ POINT (7.4225093 43.7369627) │ │ node/10068880332 │ NULL │ NULL │ platform │ POINT (7.4380849 43.7493273) │ │ node/10068880335 │ NULL │ bench │ NULL │ POINT (7.4186855 43.7321515) │ │ node/10127713363 │ NULL │ cafe │ NULL │ POINT (7.4266367 43.7420755) │ │ node/10601158089 │ NULL │ restaurant │ NULL │ POINT (7.4213086 43.7336187) │ │ node/10671507005 │ NULL │ bar │ NULL │ POINT (7.4296915 43.7423307) │ │ node/10674256605 │ NULL │ bar │ NULL │ POINT (7.4213558 43.7336317) │ │ · │ · │ · │ · │ · │ │ · │ · │ · │ · │ · │ │ · │ · │ · │ · │ · │ │ way/981971425 │ NULL │ NULL │ residential │ LINESTRING (7.4321217 43.74… │ │ way/982061461 │ NULL │ NULL │ secondary │ LINESTRING (7.4246341 43.74… │ │ way/982081599 │ NULL │ NULL │ tertiary │ LINESTRING (7.4225202 43.73… │ │ way/982081600 │ NULL │ NULL │ service │ LINESTRING (7.4225202 43.73… │ │ way/986029035 │ NULL │ NULL │ path │ LINESTRING (7.4189462 43.73… │ │ way/990669427 │ NULL │ shelter │ NULL │ POLYGON ((7.4146087 43.7338… │ │ way/990669428 │ NULL │ NULL │ secondary │ LINESTRING (7.4136598 43.73… │ │ way/990669429 │ NULL │ NULL │ secondary │ LINESTRING (7.4137621 43.73… │ │ way/990848785 │ yes │ NULL │ NULL │ POLYGON ((7.4142551 43.7339… │ │ way/993121275 │ yes │ NULL │ NULL │ POLYGON ((7.4321416 43.7481… │ ├──────────────────┴──────────┴────────────┴─────────────┴───────────────────────────────┤ │ 5772 rows (20 shown) 5 columns │ └────────────────────────────────────────────────────────────────────────────────────────┘ Get features for Malé - the capital city of Maldives Tags will be kept in a single column. >>> from shapely.geometry import box >>> gpq_path = qosm.convert_pbf_to_gpq( ... maldives_pbf_path, ... geometry_filter=box( ... minx=73.4975872, ... miny=4.1663240, ... maxx=73.5215528, ... maxy=4.1818121 ... ) ... ) >>> gpq_path.as_posix() 'files/maldives_nofilter_35532d32333a47a057265be0d7903ce27f6aa6ca3df31fe45f4ce67e4dbb3fb5_compact.geoparquet' Inspect the file with duckdb >>> import duckdb >>> duckdb.load_extension('spatial') >>> duckdb.read_parquet(str(gpq_path)).project( ... "* REPLACE (ST_GeomFromWKB(geometry) AS geometry)" ... ).order("feature_id") # doctest: +SKIP ┌──────────────────┬──────────────────────┬──────────────────────────────────────────────┐ │ feature_id │ tags │ geometry │ │ varchar │ map(varchar, varch… │ geometry │ ├──────────────────┼──────────────────────┼──────────────────────────────────────────────┤ │ node/10010180778 │ {brand=Ooredoo, br… │ POINT (73.5179039 4.1752105) │ │ node/10062500171 │ {contact:facebook=… │ POINT (73.509583 4.1724485) │ │ node/10078084764 │ {addr:city=Male', … │ POINT (73.5047972 4.1726734) │ │ node/10078086040 │ {addr:city=Malé, a… │ POINT (73.5031714 4.1759622) │ │ node/10158825718 │ {addr:postcode=201… │ POINT (73.5083189 4.1730108) │ │ node/10289176711 │ {addr:street=Dhona… │ POINT (73.5133902 4.1725724) │ │ node/10294045310 │ {amenity=restauran… │ POINT (73.5091277 4.1735378) │ │ node/10294045311 │ {amenity=restauran… │ POINT (73.5055534 4.1759515) │ │ node/10294045411 │ {amenity=restauran… │ POINT (73.5037257 4.1717866) │ │ node/10294045412 │ {amenity=restauran… │ POINT (73.5024147 4.1761633) │ │ · │ · │ · │ │ · │ · │ · │ │ · │ · │ · │ │ way/91986244 │ {highway=residenti… │ LINESTRING (73.5069785 4.1704686, 73.50759… │ │ way/91986245 │ {highway=residenti… │ LINESTRING (73.5135834 4.1740562, 73.51383… │ │ way/91986249 │ {highway=residenti… │ LINESTRING (73.5153971 4.1735146, 73.51601… │ │ way/91986251 │ {highway=residenti… │ LINESTRING (73.5082522 4.1709887, 73.50823… │ │ way/91986254 │ {highway=residenti… │ LINESTRING (73.508114 4.1693477, 73.508154… │ │ way/91986255 │ {landuse=cemetery,… │ POLYGON ((73.507509 4.1731064, 73.5078884 … │ │ way/91986256 │ {highway=residenti… │ LINESTRING (73.5106692 4.1744828, 73.51082… │ │ way/935784864 │ {layer=-1, locatio… │ LINESTRING (73.4875382 4.1703263, 73.50074… │ │ way/935784867 │ {layer=-1, locatio… │ LINESTRING (73.446172 4.1856738, 73.460937… │ │ way/959150179 │ {amenity=place_of_… │ POLYGON ((73.5184052 4.1755282, 73.5184863… │ ├──────────────────┴──────────────────────┴──────────────────────────────────────────────┤ │ 2140 rows (20 shown) 3 columns │ └────────────────────────────────────────────────────────────────────────────────────────┘ """
return PbfFileReader(
2
2023-12-28 11:26:41+00:00
24k
KyanChen/TTP
mmdet/models/roi_heads/bbox_heads/multi_instance_bbox_head.py
[ { "identifier": "BBoxHead", "path": "mmdet/models/roi_heads/bbox_heads/bbox_head.py", "snippet": "class BBoxHead(BaseModule):\n \"\"\"Simplest RoI head, with only two fc layers for classification and\n regression respectively.\"\"\"\n\n def __init__(self,\n with_avg_pool: bool =...
from typing import List, Optional, Tuple, Union from mmcv.cnn import ConvModule from mmengine.config import ConfigDict from mmengine.structures import InstanceData from torch import Tensor, nn from mmdet.models.roi_heads.bbox_heads.bbox_head import BBoxHead from mmdet.models.task_modules.samplers import SamplingResult from mmdet.models.utils import empty_instances from mmdet.registry import MODELS from mmdet.structures.bbox import bbox_overlaps import numpy as np import torch import torch.nn.functional as F
15,729
bbox_pred: Tensor, img_meta: dict, rescale: bool = False, rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5). last dimension 5 arrange as (batch_index, x1, y1, x2, y2). cls_score (Tensor): Box scores, has shape (num_boxes, num_classes + 1). bbox_pred (Tensor): Box energies / deltas. has shape (num_boxes, num_classes * 4). img_meta (dict): image information. rescale (bool): If True, return boxes in original image space. Defaults to False. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Defaults to None Returns: :obj:`InstanceData`: Detection results of each image. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ cls_score = cls_score.reshape(-1, self.num_classes + 1) bbox_pred = bbox_pred.reshape(-1, 4) roi = roi.repeat_interleave(self.num_instance, dim=0) results = InstanceData() if roi.shape[0] == 0: return empty_instances([img_meta], roi.device, task_type='bbox', instance_results=[results])[0] scores = cls_score.softmax(dim=-1) if cls_score is not None else None img_shape = img_meta['img_shape'] bboxes = self.bbox_coder.decode( roi[..., 1:], bbox_pred, max_shape=img_shape) if rescale and bboxes.size(0) > 0: assert img_meta.get('scale_factor') is not None scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat( (1, 2)) bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view( bboxes.size()[0], -1) if rcnn_test_cfg is None: # This means that it is aug test. # It needs to return the raw results without nms. results.bboxes = bboxes results.scores = scores else: roi_idx = np.tile( np.arange(bboxes.shape[0] / self.num_instance)[:, None], (1, self.num_instance)).reshape(-1, 1)[:, 0] roi_idx = torch.from_numpy(roi_idx).to(bboxes.device).reshape( -1, 1) bboxes = torch.cat([bboxes, roi_idx], dim=1) det_bboxes, det_scores = self.set_nms( bboxes, scores[:, 1], rcnn_test_cfg.score_thr, rcnn_test_cfg.nms['iou_threshold'], rcnn_test_cfg.max_per_img) results.bboxes = det_bboxes[:, :-1] results.scores = det_scores results.labels = torch.zeros_like(det_scores) return results @staticmethod def set_nms(bboxes: Tensor, scores: Tensor, score_thr: float, iou_threshold: float, max_num: int = -1) -> Tuple[Tensor, Tensor]: """NMS for multi-instance prediction. Please refer to https://github.com/Purkialo/CrowdDet for more details. Args: bboxes (Tensor): predict bboxes. scores (Tensor): The score of each predict bbox. score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. iou_threshold (float): IoU threshold to be considered as conflicted. max_num (int, optional): if there are more than max_num bboxes after NMS, only top max_num will be kept. Default to -1. Returns: Tuple[Tensor, Tensor]: (bboxes, scores). """ bboxes = bboxes[scores > score_thr] scores = scores[scores > score_thr] ordered_scores, order = scores.sort(descending=True) ordered_bboxes = bboxes[order] roi_idx = ordered_bboxes[:, -1] keep = torch.ones(len(ordered_bboxes)) == 1 ruler = torch.arange(len(ordered_bboxes)) keep = keep.to(bboxes.device) ruler = ruler.to(bboxes.device) while ruler.shape[0] > 0: basement = ruler[0] ruler = ruler[1:] idx = roi_idx[basement] # calculate the body overlap basement_bbox = ordered_bboxes[:, :4][basement].reshape(-1, 4) ruler_bbox = ordered_bboxes[:, :4][ruler].reshape(-1, 4)
# Copyright (c) OpenMMLab. All rights reserved. @MODELS.register_module() class MultiInstanceBBoxHead(BBoxHead): r"""Bbox head used in CrowdDet. .. code-block:: none /-> cls convs_1 -> cls fcs_1 -> cls_1 |-- | \-> reg convs_1 -> reg fcs_1 -> reg_1 | | /-> cls convs_2 -> cls fcs_2 -> cls_2 shared convs -> shared fcs |-- | \-> reg convs_2 -> reg fcs_2 -> reg_2 | | ... | | /-> cls convs_k -> cls fcs_k -> cls_k |-- \-> reg convs_k -> reg fcs_k -> reg_k Args: num_instance (int): The number of branches after shared fcs. Defaults to 2. with_refine (bool): Whether to use refine module. Defaults to False. num_shared_convs (int): The number of shared convs. Defaults to 0. num_shared_fcs (int): The number of shared fcs. Defaults to 2. num_cls_convs (int): The number of cls convs. Defaults to 0. num_cls_fcs (int): The number of cls fcs. Defaults to 0. num_reg_convs (int): The number of reg convs. Defaults to 0. num_reg_fcs (int): The number of reg fcs. Defaults to 0. conv_out_channels (int): The number of conv out channels. Defaults to 256. fc_out_channels (int): The number of fc out channels. Defaults to 1024. init_cfg (dict or list[dict], optional): Initialization config dict. Defaults to None. """ # noqa: W605 def __init__(self, num_instance: int = 2, with_refine: bool = False, num_shared_convs: int = 0, num_shared_fcs: int = 2, num_cls_convs: int = 0, num_cls_fcs: int = 0, num_reg_convs: int = 0, num_reg_fcs: int = 0, conv_out_channels: int = 256, fc_out_channels: int = 1024, init_cfg: Optional[Union[dict, ConfigDict]] = None, *args, **kwargs) -> None: super().__init__(*args, init_cfg=init_cfg, **kwargs) assert (num_shared_convs + num_shared_fcs + num_cls_convs + num_cls_fcs + num_reg_convs + num_reg_fcs > 0) assert num_instance == 2, 'Currently only 2 instances are supported' if num_cls_convs > 0 or num_reg_convs > 0: assert num_shared_fcs == 0 if not self.with_cls: assert num_cls_convs == 0 and num_cls_fcs == 0 if not self.with_reg: assert num_reg_convs == 0 and num_reg_fcs == 0 self.num_instance = num_instance self.num_shared_convs = num_shared_convs self.num_shared_fcs = num_shared_fcs self.num_cls_convs = num_cls_convs self.num_cls_fcs = num_cls_fcs self.num_reg_convs = num_reg_convs self.num_reg_fcs = num_reg_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.with_refine = with_refine # add shared convs and fcs self.shared_convs, self.shared_fcs, last_layer_dim = \ self._add_conv_fc_branch( self.num_shared_convs, self.num_shared_fcs, self.in_channels, True) self.shared_out_channels = last_layer_dim self.relu = nn.ReLU(inplace=True) if self.with_refine: refine_model_cfg = { 'type': 'Linear', 'in_features': self.shared_out_channels + 20, 'out_features': self.shared_out_channels } self.shared_fcs_ref = MODELS.build(refine_model_cfg) self.fc_cls_ref = nn.ModuleList() self.fc_reg_ref = nn.ModuleList() self.cls_convs = nn.ModuleList() self.cls_fcs = nn.ModuleList() self.reg_convs = nn.ModuleList() self.reg_fcs = nn.ModuleList() self.cls_last_dim = list() self.reg_last_dim = list() self.fc_cls = nn.ModuleList() self.fc_reg = nn.ModuleList() for k in range(self.num_instance): # add cls specific branch cls_convs, cls_fcs, cls_last_dim = self._add_conv_fc_branch( self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) self.cls_convs.append(cls_convs) self.cls_fcs.append(cls_fcs) self.cls_last_dim.append(cls_last_dim) # add reg specific branch reg_convs, reg_fcs, reg_last_dim = self._add_conv_fc_branch( self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) self.reg_convs.append(reg_convs) self.reg_fcs.append(reg_fcs) self.reg_last_dim.append(reg_last_dim) if self.num_shared_fcs == 0 and not self.with_avg_pool: if self.num_cls_fcs == 0: self.cls_last_dim *= self.roi_feat_area if self.num_reg_fcs == 0: self.reg_last_dim *= self.roi_feat_area if self.with_cls: if self.custom_cls_channels: cls_channels = self.loss_cls.get_cls_channels( self.num_classes) else: cls_channels = self.num_classes + 1 cls_predictor_cfg_ = self.cls_predictor_cfg.copy() # deepcopy cls_predictor_cfg_.update( in_features=self.cls_last_dim[k], out_features=cls_channels) self.fc_cls.append(MODELS.build(cls_predictor_cfg_)) if self.with_refine: self.fc_cls_ref.append(MODELS.build(cls_predictor_cfg_)) if self.with_reg: out_dim_reg = (4 if self.reg_class_agnostic else 4 * self.num_classes) reg_predictor_cfg_ = self.reg_predictor_cfg.copy() reg_predictor_cfg_.update( in_features=self.reg_last_dim[k], out_features=out_dim_reg) self.fc_reg.append(MODELS.build(reg_predictor_cfg_)) if self.with_refine: self.fc_reg_ref.append(MODELS.build(reg_predictor_cfg_)) if init_cfg is None: # when init_cfg is None, # It has been set to # [[dict(type='Normal', std=0.01, override=dict(name='fc_cls'))], # [dict(type='Normal', std=0.001, override=dict(name='fc_reg'))] # after `super(ConvFCBBoxHead, self).__init__()` # we only need to append additional configuration # for `shared_fcs`, `cls_fcs` and `reg_fcs` self.init_cfg += [ dict( type='Xavier', distribution='uniform', override=[ dict(name='shared_fcs'), dict(name='cls_fcs'), dict(name='reg_fcs') ]) ] def _add_conv_fc_branch(self, num_branch_convs: int, num_branch_fcs: int, in_channels: int, is_shared: bool = False) -> tuple: """Add shared or separable branch. convs -> avg pool (optional) -> fcs """ last_layer_dim = in_channels # add branch specific conv layers branch_convs = nn.ModuleList() if num_branch_convs > 0: for i in range(num_branch_convs): conv_in_channels = ( last_layer_dim if i == 0 else self.conv_out_channels) branch_convs.append( ConvModule( conv_in_channels, self.conv_out_channels, 3, padding=1)) last_layer_dim = self.conv_out_channels # add branch specific fc layers branch_fcs = nn.ModuleList() if num_branch_fcs > 0: # for shared branch, only consider self.with_avg_pool # for separated branches, also consider self.num_shared_fcs if (is_shared or self.num_shared_fcs == 0) and not self.with_avg_pool: last_layer_dim *= self.roi_feat_area for i in range(num_branch_fcs): fc_in_channels = ( last_layer_dim if i == 0 else self.fc_out_channels) branch_fcs.append( nn.Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels return branch_convs, branch_fcs, last_layer_dim def forward(self, x: Tuple[Tensor]) -> tuple: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: A tuple of classification scores and bbox prediction. - cls_score (Tensor): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * num_classes. - bbox_pred (Tensor): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * 4. - cls_score_ref (Tensor): The cls_score after refine model. - bbox_pred_ref (Tensor): The bbox_pred after refine model. """ # shared part if self.num_shared_convs > 0: for conv in self.shared_convs: x = conv(x) if self.num_shared_fcs > 0: if self.with_avg_pool: x = self.avg_pool(x) x = x.flatten(1) for fc in self.shared_fcs: x = self.relu(fc(x)) x_cls = x x_reg = x # separate branches cls_score = list() bbox_pred = list() for k in range(self.num_instance): for conv in self.cls_convs[k]: x_cls = conv(x_cls) if x_cls.dim() > 2: if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls = x_cls.flatten(1) for fc in self.cls_fcs[k]: x_cls = self.relu(fc(x_cls)) for conv in self.reg_convs[k]: x_reg = conv(x_reg) if x_reg.dim() > 2: if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg = x_reg.flatten(1) for fc in self.reg_fcs[k]: x_reg = self.relu(fc(x_reg)) cls_score.append(self.fc_cls[k](x_cls) if self.with_cls else None) bbox_pred.append(self.fc_reg[k](x_reg) if self.with_reg else None) if self.with_refine: x_ref = x cls_score_ref = list() bbox_pred_ref = list() for k in range(self.num_instance): feat_ref = cls_score[k].softmax(dim=-1) feat_ref = torch.cat((bbox_pred[k], feat_ref[:, 1][:, None]), dim=1).repeat(1, 4) feat_ref = torch.cat((x_ref, feat_ref), dim=1) feat_ref = F.relu_(self.shared_fcs_ref(feat_ref)) cls_score_ref.append(self.fc_cls_ref[k](feat_ref)) bbox_pred_ref.append(self.fc_reg_ref[k](feat_ref)) cls_score = torch.cat(cls_score, dim=1) bbox_pred = torch.cat(bbox_pred, dim=1) cls_score_ref = torch.cat(cls_score_ref, dim=1) bbox_pred_ref = torch.cat(bbox_pred_ref, dim=1) return cls_score, bbox_pred, cls_score_ref, bbox_pred_ref cls_score = torch.cat(cls_score, dim=1) bbox_pred = torch.cat(bbox_pred, dim=1) return cls_score, bbox_pred def get_targets(self, sampling_results: List[SamplingResult], rcnn_train_cfg: ConfigDict, concat: bool = True) -> tuple: """Calculate the ground truth for all samples in a batch according to the sampling_results. Almost the same as the implementation in bbox_head, we passed additional parameters pos_inds_list and neg_inds_list to `_get_targets_single` function. Args: sampling_results (List[obj:SamplingResult]): Assign results of all images in a batch after sampling. rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. concat (bool): Whether to concatenate the results of all the images in a single batch. Returns: Tuple[Tensor]: Ground truth for proposals in a single image. Containing the following list of Tensors: - labels (list[Tensor],Tensor): Gt_labels for all proposals in a batch, each tensor in list has shape (num_proposals,) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals,). - label_weights (list[Tensor]): Labels_weights for all proposals in a batch, each tensor in list has shape (num_proposals,) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals,). - bbox_targets (list[Tensor],Tensor): Regression target for all proposals in a batch, each tensor in list has shape (num_proposals, 4) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - bbox_weights (list[tensor],Tensor): Regression weights for all proposals in a batch, each tensor in list has shape (num_proposals, 4) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals, 4). """ labels = [] bbox_targets = [] bbox_weights = [] label_weights = [] for i in range(len(sampling_results)): sample_bboxes = torch.cat([ sampling_results[i].pos_gt_bboxes, sampling_results[i].neg_gt_bboxes ]) sample_priors = sampling_results[i].priors sample_priors = sample_priors.repeat(1, self.num_instance).reshape( -1, 4) sample_bboxes = sample_bboxes.reshape(-1, 4) if not self.reg_decoded_bbox: _bbox_targets = self.bbox_coder.encode(sample_priors, sample_bboxes) else: _bbox_targets = sample_priors _bbox_targets = _bbox_targets.reshape(-1, self.num_instance * 4) _bbox_weights = torch.ones(_bbox_targets.shape) _labels = torch.cat([ sampling_results[i].pos_gt_labels, sampling_results[i].neg_gt_labels ]) _labels_weights = torch.ones(_labels.shape) bbox_targets.append(_bbox_targets) bbox_weights.append(_bbox_weights) labels.append(_labels) label_weights.append(_labels_weights) if concat: labels = torch.cat(labels, 0) label_weights = torch.cat(label_weights, 0) bbox_targets = torch.cat(bbox_targets, 0) bbox_weights = torch.cat(bbox_weights, 0) return labels, label_weights, bbox_targets, bbox_weights def loss(self, cls_score: Tensor, bbox_pred: Tensor, rois: Tensor, labels: Tensor, label_weights: Tensor, bbox_targets: Tensor, bbox_weights: Tensor, **kwargs) -> dict: """Calculate the loss based on the network predictions and targets. Args: cls_score (Tensor): Classification prediction results of all class, has shape (batch_size * num_proposals_single_image, (num_classes + 1) * k), k represents the number of prediction boxes generated by each proposal box. bbox_pred (Tensor): Regression prediction results, has shape (batch_size * num_proposals_single_image, 4 * k), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. rois (Tensor): RoIs with the shape (batch_size * num_proposals_single_image, 5) where the first column indicates batch id of each RoI. labels (Tensor): Gt_labels for all proposals in a batch, has shape (batch_size * num_proposals_single_image, k). label_weights (Tensor): Labels_weights for all proposals in a batch, has shape (batch_size * num_proposals_single_image, k). bbox_targets (Tensor): Regression target for all proposals in a batch, has shape (batch_size * num_proposals_single_image, 4 * k), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. bbox_weights (Tensor): Regression weights for all proposals in a batch, has shape (batch_size * num_proposals_single_image, 4 * k). Returns: dict: A dictionary of loss. """ losses = dict() if bbox_pred.numel(): loss_0 = self.emd_loss(bbox_pred[:, 0:4], cls_score[:, 0:2], bbox_pred[:, 4:8], cls_score[:, 2:4], bbox_targets, labels) loss_1 = self.emd_loss(bbox_pred[:, 4:8], cls_score[:, 2:4], bbox_pred[:, 0:4], cls_score[:, 0:2], bbox_targets, labels) loss = torch.cat([loss_0, loss_1], dim=1) _, min_indices = loss.min(dim=1) loss_emd = loss[torch.arange(loss.shape[0]), min_indices] loss_emd = loss_emd.mean() else: loss_emd = bbox_pred.sum() losses['loss_rcnn_emd'] = loss_emd return losses def emd_loss(self, bbox_pred_0: Tensor, cls_score_0: Tensor, bbox_pred_1: Tensor, cls_score_1: Tensor, targets: Tensor, labels: Tensor) -> Tensor: """Calculate the emd loss. Note: This implementation is modified from https://github.com/Purkialo/ CrowdDet/blob/master/lib/det_oprs/loss_opr.py Args: bbox_pred_0 (Tensor): Part of regression prediction results, has shape (batch_size * num_proposals_single_image, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. cls_score_0 (Tensor): Part of classification prediction results, has shape (batch_size * num_proposals_single_image, (num_classes + 1)), where 1 represents the background. bbox_pred_1 (Tensor): The other part of regression prediction results, has shape (batch_size*num_proposals_single_image, 4). cls_score_1 (Tensor):The other part of classification prediction results, has shape (batch_size * num_proposals_single_image, (num_classes + 1)). targets (Tensor):Regression target for all proposals in a batch, has shape (batch_size * num_proposals_single_image, 4 * k), the last dimension 4 represents [tl_x, tl_y, br_x, br_y], k represents the number of prediction boxes generated by each proposal box. labels (Tensor): Gt_labels for all proposals in a batch, has shape (batch_size * num_proposals_single_image, k). Returns: torch.Tensor: The calculated loss. """ bbox_pred = torch.cat([bbox_pred_0, bbox_pred_1], dim=1).reshape(-1, bbox_pred_0.shape[-1]) cls_score = torch.cat([cls_score_0, cls_score_1], dim=1).reshape(-1, cls_score_0.shape[-1]) targets = targets.reshape(-1, 4) labels = labels.long().flatten() # masks valid_masks = labels >= 0 fg_masks = labels > 0 # multiple class bbox_pred = bbox_pred.reshape(-1, self.num_classes, 4) fg_gt_classes = labels[fg_masks] bbox_pred = bbox_pred[fg_masks, fg_gt_classes - 1, :] # loss for regression loss_bbox = self.loss_bbox(bbox_pred, targets[fg_masks]) loss_bbox = loss_bbox.sum(dim=1) # loss for classification labels = labels * valid_masks loss_cls = self.loss_cls(cls_score, labels) loss_cls[fg_masks] = loss_cls[fg_masks] + loss_bbox loss = loss_cls.reshape(-1, 2).sum(dim=1) return loss.reshape(-1, 1) def _predict_by_feat_single( self, roi: Tensor, cls_score: Tensor, bbox_pred: Tensor, img_meta: dict, rescale: bool = False, rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5). last dimension 5 arrange as (batch_index, x1, y1, x2, y2). cls_score (Tensor): Box scores, has shape (num_boxes, num_classes + 1). bbox_pred (Tensor): Box energies / deltas. has shape (num_boxes, num_classes * 4). img_meta (dict): image information. rescale (bool): If True, return boxes in original image space. Defaults to False. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Defaults to None Returns: :obj:`InstanceData`: Detection results of each image. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ cls_score = cls_score.reshape(-1, self.num_classes + 1) bbox_pred = bbox_pred.reshape(-1, 4) roi = roi.repeat_interleave(self.num_instance, dim=0) results = InstanceData() if roi.shape[0] == 0: return empty_instances([img_meta], roi.device, task_type='bbox', instance_results=[results])[0] scores = cls_score.softmax(dim=-1) if cls_score is not None else None img_shape = img_meta['img_shape'] bboxes = self.bbox_coder.decode( roi[..., 1:], bbox_pred, max_shape=img_shape) if rescale and bboxes.size(0) > 0: assert img_meta.get('scale_factor') is not None scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat( (1, 2)) bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view( bboxes.size()[0], -1) if rcnn_test_cfg is None: # This means that it is aug test. # It needs to return the raw results without nms. results.bboxes = bboxes results.scores = scores else: roi_idx = np.tile( np.arange(bboxes.shape[0] / self.num_instance)[:, None], (1, self.num_instance)).reshape(-1, 1)[:, 0] roi_idx = torch.from_numpy(roi_idx).to(bboxes.device).reshape( -1, 1) bboxes = torch.cat([bboxes, roi_idx], dim=1) det_bboxes, det_scores = self.set_nms( bboxes, scores[:, 1], rcnn_test_cfg.score_thr, rcnn_test_cfg.nms['iou_threshold'], rcnn_test_cfg.max_per_img) results.bboxes = det_bboxes[:, :-1] results.scores = det_scores results.labels = torch.zeros_like(det_scores) return results @staticmethod def set_nms(bboxes: Tensor, scores: Tensor, score_thr: float, iou_threshold: float, max_num: int = -1) -> Tuple[Tensor, Tensor]: """NMS for multi-instance prediction. Please refer to https://github.com/Purkialo/CrowdDet for more details. Args: bboxes (Tensor): predict bboxes. scores (Tensor): The score of each predict bbox. score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. iou_threshold (float): IoU threshold to be considered as conflicted. max_num (int, optional): if there are more than max_num bboxes after NMS, only top max_num will be kept. Default to -1. Returns: Tuple[Tensor, Tensor]: (bboxes, scores). """ bboxes = bboxes[scores > score_thr] scores = scores[scores > score_thr] ordered_scores, order = scores.sort(descending=True) ordered_bboxes = bboxes[order] roi_idx = ordered_bboxes[:, -1] keep = torch.ones(len(ordered_bboxes)) == 1 ruler = torch.arange(len(ordered_bboxes)) keep = keep.to(bboxes.device) ruler = ruler.to(bboxes.device) while ruler.shape[0] > 0: basement = ruler[0] ruler = ruler[1:] idx = roi_idx[basement] # calculate the body overlap basement_bbox = ordered_bboxes[:, :4][basement].reshape(-1, 4) ruler_bbox = ordered_bboxes[:, :4][ruler].reshape(-1, 4)
overlap = bbox_overlaps(basement_bbox, ruler_bbox)
4
2023-12-23 08:36:47+00:00
24k
see2023/Bert-VITS2-ext
train_ms.py
[ { "identifier": "config", "path": "config.py", "snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, ...
import platform import os import torch import torch.distributed as dist import logging import argparse import datetime import gc import commons import utils from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from config import config from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler, AudioVisemesLoader, ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, WavLMDiscriminator, VisemesNet, ) from losses import ( generator_loss, discriminator_loss, feature_loss, kl_loss, WavLMLoss, ) from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
15,278
) else: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc, net_wd, wl], [optim_g, optim_d, optim_dur_disc, optim_wd], [scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() scheduler_wd.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, local_rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, ): net_g, net_d, net_dur_disc, net_wd, wl = nets optim_g, optim_d, optim_dur_disc, optim_wd = optims scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() net_wd.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) in enumerate(tqdm(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(local_rank, non_blocking=True), x_lengths.cuda( local_rank, non_blocking=True ) spec, spec_lengths = spec.cuda( local_rank, non_blocking=True ), spec_lengths.cuda(local_rank, non_blocking=True) y, y_lengths = y.cuda(local_rank, non_blocking=True), y_lengths.cuda( local_rank, non_blocking=True ) speakers = speakers.cuda(local_rank, non_blocking=True) tone = tone.cuda(local_rank, non_blocking=True) language = language.cuda(local_rank, non_blocking=True) bert = bert.cuda(local_rank, non_blocking=True) ja_bert = ja_bert.cuda(local_rank, non_blocking=True) en_bert = en_bert.cuda(local_rank, non_blocking=True) with autocast(enabled=hps.train.bf16_run, dtype=torch.bfloat16): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_, logw_sdp), g, ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length )
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) logger = logging.getLogger(__name__) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 global_step = 0 global_visemes_step = 0 def run_only_visemes(hps): # 使用最简单的单机模式,仅训练隐变量z到表情(visemes)的全连接 VisemesFCNet 的参数 global global_visemes_step torch.manual_seed(hps.train.seed) torch.cuda.set_device(0) train_dataset = AudioVisemesLoader(hps.data.training_visemes_files, hps.data) train_loader = DataLoader(train_dataset, num_workers=0, shuffle=False, pin_memory=True, batch_size=1, drop_last=True) eval_dataset = AudioVisemesLoader(hps.data.validation_visemes_files, hps.data) eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False) net_v = VisemesNet(hps.model.hidden_channels).cuda() latest_model_path = utils.latest_checkpoint_path(hps.model_dir, "V_*.pth") if latest_model_path is not None: _, optim_d, _, epoch_str = utils.load_checkpoint(latest_model_path, net_v, None, skip_optimizer=False) else : epoch_str = 1 global_visemes_step = 0 net_v.init_weights() optim_v = torch.optim.AdamW( net_v.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) optim_v.param_groups[0]['initial_lr'] = hps.train.learning_rate scheduler_v = torch.optim.lr_scheduler.ExponentialLR(optim_v, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2, ) scaler = GradScaler(enabled=hps.train.bf16_run) for epoch in range(epoch_str, hps.train.epochs + 1): train_visemes_only(epoch, hps, net_v, train_loader, optim_v, scaler) scheduler_v.step() if epoch % hps.train.eval_interval == 0: eval_visemes_only(epoch, hps, net_v, eval_loader) utils.save_checkpoint(net_v, optim_v,hps.train.learning_rate , epoch, os.path.join(hps.model_dir, "V_{}.pth".format(epoch))) def train_visemes_only(epoch, hps, net_v, train_loader, optim_v, scaler): for batch_idx, (spec, visemes) in tqdm(enumerate(train_loader)): spec, visemes = spec.cuda(), visemes.cuda() with autocast(enabled=hps.train.bf16_run): # 通过VisemesNet从z生成visemes_hat,和均方差 visemes_hat = net_v(spec) visemes_hat_mse = get_visemes_mse(visemes, visemes_hat) optim_v.zero_grad() scaler.scale(visemes_hat_mse).backward() scaler.unscale_(optim_v) grad_norm_v = commons.clip_grad_value_(net_v.parameters(), None) scaler.step(optim_v) global global_visemes_step global_visemes_step += 1 if batch_idx % hps.train.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tvisemes_hat_mse: {:.6f}\tgrad_norm_v: {:.6f}'.format( epoch, batch_idx * len(spec), len(train_loader.dataset), 100. * batch_idx / len(train_loader), visemes_hat_mse.item(), grad_norm_v)) def get_visemes_mse(visemes, visemes_hat): if visemes.shape[-1] != visemes_hat.shape[-1]: # 如果y和x的最低维度不一样 visemes_hat = F.interpolate(visemes_hat, size=visemes.shape[-1], mode='linear', align_corners=True) # 对x进行线性插值,使其形状与y一致 visemes_hat_mse = torch.mean(torch.pow(visemes_hat - visemes, 2)) return visemes_hat_mse def eval_visemes_only(epoch, hps, net_v, eval_loader): net_v.eval() with torch.no_grad(): visemes_hat_mse_sum = 0.0 for batch_idx, (spec, visemes) in tqdm(enumerate(eval_loader)): spec, visemes = spec.cuda(), visemes.cuda() # 通过VisemesFCNet从z生成visemes_hat,和均方差 visemes_hat = net_v(spec) visemes_hat_mse = get_visemes_mse(visemes, visemes_hat) visemes_hat_mse_sum += visemes_hat_mse # print('visemes_hat_mse', visemes_hat_mse) break visemes_hat_mse_avg = visemes_hat_mse_sum / (batch_idx + 1) log_str = '------------------ eval epoch: {} visemes_hat_mse_avg: {:.6f}'.format(epoch, visemes_hat_mse_avg) print(log_str) logger.warning(log_str) net_v.train() def run(): # 环境变量解析 envs = config.train_ms_config.env for env_name, env_value in envs.items(): if env_name not in os.environ.keys(): print("加载config中的配置{}".format(str(env_value))) os.environ[env_name] = str(env_value) print( "加载环境变量 \nMASTER_ADDR: {},\nMASTER_PORT: {},\nWORLD_SIZE: {},\nRANK: {},\nLOCAL_RANK: {}".format( os.environ["MASTER_ADDR"], os.environ["MASTER_PORT"], os.environ["WORLD_SIZE"], os.environ["RANK"], os.environ["LOCAL_RANK"], ) ) backend = "nccl" if platform.system() == "Windows": backend = "gloo" # If Windows,switch to gloo backend. dist.init_process_group( backend=backend, init_method="env://", timeout=datetime.timedelta(seconds=300), ) # Use torchrun instead of mp.spawn rank = dist.get_rank() local_rank = int(os.environ["LOCAL_RANK"]) n_gpus = dist.get_world_size() # 命令行/config.yml配置解析 # hps = utils.get_hparams() parser = argparse.ArgumentParser() # 非必要不建议使用命令行配置,请使用config.yml文件 parser.add_argument( "-c", "--config", type=str, default=config.train_ms_config.config_path, help="JSON file for configuration", ) parser.add_argument( "-m", "--model", type=str, help="数据集文件夹路径,请注意,数据不再默认放在/logs文件夹下。如果需要用命令行配置,请声明相对于根目录的路径", default=config.dataset_path, ) parser.add_argument('--visemes', dest='visemes', action="store_true", default=False, help="train visemes only, lock the encoder and decoder") args = parser.parse_args() model_dir = os.path.join(args.model, config.train_ms_config.model) if not os.path.exists(model_dir): os.makedirs(model_dir) hps = utils.get_hparams_from_file(args.config) hps.model_dir = model_dir set_logger(hps) if args.visemes: run_only_visemes(hps) # 比较路径是否相同 if os.path.realpath(args.config) != os.path.realpath( config.train_ms_config.config_path ): with open(args.config, "r", encoding="utf-8") as f: data = f.read() with open(config.train_ms_config.config_path, "w", encoding="utf-8") as f: f.write(data) torch.manual_seed(hps.train.seed) torch.cuda.set_device(local_rank) global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=min(config.train_ms_config.num_workers, os.cpu_count() - 1), shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True, prefetch_factor=4, ) # DataLoader config could be adjusted. if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas is True ): print("Using noise scaled MAS for VITS2") mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator is True ): print("Using duration discriminator for VITS2") net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(local_rank) else: net_dur_disc = None if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder is True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) else: print("Using normal encoder for VITS1") net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(local_rank) if getattr(hps.train, "freeze_ZH_bert", False): print("Freezing ZH bert encoder !!!") for param in net_g.enc_p.bert_proj.parameters(): param.requires_grad = False if getattr(hps.train, "freeze_EN_bert", False): print("Freezing EN bert encoder !!!") for param in net_g.enc_p.en_bert_proj.parameters(): param.requires_grad = False if getattr(hps.train, "freeze_JP_bert", False): print("Freezing JP bert encoder !!!") for param in net_g.enc_p.ja_bert_proj.parameters(): param.requires_grad = False net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(local_rank) net_wd = WavLMDiscriminator( hps.model.slm.hidden, hps.model.slm.nlayers, hps.model.slm.initial_channel ).cuda(local_rank) optim_g = torch.optim.AdamW( filter(lambda p: p.requires_grad, net_g.parameters()), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_wd = torch.optim.AdamW( net_wd.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) if net_dur_disc is not None: optim_dur_disc = torch.optim.AdamW( net_dur_disc.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) else: optim_dur_disc = None net_g = DDP(net_g, device_ids=[local_rank], bucket_cap_mb=512) net_d = DDP(net_d, device_ids=[local_rank], bucket_cap_mb=512) net_wd = DDP(net_wd, device_ids=[local_rank], bucket_cap_mb=512) if net_dur_disc is not None: net_dur_disc = DDP( net_dur_disc, device_ids=[local_rank], bucket_cap_mb=512, ) # 下载底模 if config.train_ms_config.base["use_base_model"]: utils.download_checkpoint( hps.model_dir, config.train_ms_config.base, token=config.openi_token, mirror=config.mirror, ) dur_resume_lr = hps.train.learning_rate wd_resume_lr = hps.train.learning_rate if net_dur_disc is not None: try: _, _, dur_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_dur_disc.param_groups[0].get("initial_lr"): optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr except: print("Initialize dur_disc") try: _, optim_g, g_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_d, d_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_g.param_groups[0].get("initial_lr"): optim_g.param_groups[0]["initial_lr"] = g_resume_lr if not optim_d.param_groups[0].get("initial_lr"): optim_d.param_groups[0]["initial_lr"] = d_resume_lr epoch_str = max(epoch_str, 1) # global_step = (epoch_str - 1) * len(train_loader) global_step = int( utils.get_steps(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth")) ) print( f"******************检测到模型存在,epoch为 {epoch_str},gloabl step为 {global_step}*********************" ) except Exception as e: print(e) epoch_str = 1 global_step = 0 try: _, optim_wd, wd_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "WD_*.pth"), net_wd, optim_wd, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_wd.param_groups[0].get("initial_lr"): optim_wd.param_groups[0]["initial_lr"] = wd_resume_lr except Exception as e: print(e) scheduler_g = torch.optim.lr_scheduler.ExponentialLR( optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) scheduler_d = torch.optim.lr_scheduler.ExponentialLR( optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) scheduler_wd = torch.optim.lr_scheduler.ExponentialLR( optim_wd, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) if net_dur_disc is not None: scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR( optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.bf16_run) wl = WavLMLoss( hps.model.slm.model, net_wd, hps.data.sampling_rate, hps.model.slm.sr, ).to(local_rank) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc, net_wd, wl], [optim_g, optim_d, optim_dur_disc, optim_wd], [scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc, net_wd, wl], [optim_g, optim_d, optim_dur_disc, optim_wd], [scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() scheduler_wd.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, local_rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, ): net_g, net_d, net_dur_disc, net_wd, wl = nets optim_g, optim_d, optim_dur_disc, optim_wd = optims scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() net_wd.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) in enumerate(tqdm(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(local_rank, non_blocking=True), x_lengths.cuda( local_rank, non_blocking=True ) spec, spec_lengths = spec.cuda( local_rank, non_blocking=True ), spec_lengths.cuda(local_rank, non_blocking=True) y, y_lengths = y.cuda(local_rank, non_blocking=True), y_lengths.cuda( local_rank, non_blocking=True ) speakers = speakers.cuda(local_rank, non_blocking=True) tone = tone.cuda(local_rank, non_blocking=True) language = language.cuda(local_rank, non_blocking=True) bert = bert.cuda(local_rank, non_blocking=True) ja_bert = ja_bert.cuda(local_rank, non_blocking=True) en_bert = en_bert.cuda(local_rank, non_blocking=True) with autocast(enabled=hps.train.bf16_run, dtype=torch.bfloat16): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_, logw_sdp), g, ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length )
y_hat_mel = mel_spectrogram_torch(
15
2023-12-27 03:09:11+00:00
24k
chinhsuanwu/ifusion-threestudio
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
15,498
"+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
elif isinstance(other, ImplicitVolume):
4
2023-12-27 20:30:33+00:00
24k
open-mmlab/Amphion
modules/wenet_extractor/transformer/encoder.py
[ { "identifier": "MultiHeadedAttention", "path": "modules/wenet_extractor/transformer/attention.py", "snippet": "class MultiHeadedAttention(nn.Module):\n \"\"\"Multi-Head Attention layer.\n\n Args:\n n_head (int): The number of heads.\n n_feat (int): The number of features.\n d...
from typing import Tuple from modules.wenet_extractor.transformer.attention import MultiHeadedAttention from modules.wenet_extractor.transformer.attention import ( RelPositionMultiHeadedAttention, ) from modules.wenet_extractor.transformer.convolution import ConvolutionModule from modules.wenet_extractor.transformer.embedding import PositionalEncoding from modules.wenet_extractor.transformer.embedding import RelPositionalEncoding from modules.wenet_extractor.transformer.embedding import NoPositionalEncoding from modules.wenet_extractor.transformer.encoder_layer import TransformerEncoderLayer from modules.wenet_extractor.transformer.encoder_layer import ConformerEncoderLayer from modules.wenet_extractor.transformer.positionwise_feed_forward import ( PositionwiseFeedForward, ) from modules.wenet_extractor.transformer.subsampling import Conv2dSubsampling4 from modules.wenet_extractor.transformer.subsampling import Conv2dSubsampling6 from modules.wenet_extractor.transformer.subsampling import Conv2dSubsampling8 from modules.wenet_extractor.transformer.subsampling import LinearNoSubsampling from modules.wenet_extractor.utils.common import get_activation from modules.wenet_extractor.utils.mask import make_pad_mask from modules.wenet_extractor.utils.mask import add_optional_chunk_mask import torch
15,865
if self.normalize_before: xs = self.after_norm(xs) # NOTE(xcsong): shape(r_att_cache) is (elayers, head, ?, d_k * 2), # ? may be larger than cache_t1, it depends on required_cache_size r_att_cache = torch.cat(r_att_cache, dim=0) # NOTE(xcsong): shape(r_cnn_cache) is (e, b=1, hidden-dim, cache_t2) r_cnn_cache = torch.cat(r_cnn_cache, dim=0) return (xs, r_att_cache, r_cnn_cache) def forward_chunk_by_chunk( self, xs: torch.Tensor, decoding_chunk_size: int, num_decoding_left_chunks: int = -1, ) -> Tuple[torch.Tensor, torch.Tensor]: """Forward input chunk by chunk with chunk_size like a streaming fashion Here we should pay special attention to computation cache in the streaming style forward chunk by chunk. Three things should be taken into account for computation in the current network: 1. transformer/conformer encoder layers output cache 2. convolution in conformer 3. convolution in subsampling However, we don't implement subsampling cache for: 1. We can control subsampling module to output the right result by overlapping input instead of cache left context, even though it wastes some computation, but subsampling only takes a very small fraction of computation in the whole model. 2. Typically, there are several covolution layers with subsampling in subsampling module, it is tricky and complicated to do cache with different convolution layers with different subsampling rate. 3. Currently, nn.Sequential is used to stack all the convolution layers in subsampling, we need to rewrite it to make it work with cache, which is not prefered. Args: xs (torch.Tensor): (1, max_len, dim) chunk_size (int): decoding chunk size """ assert decoding_chunk_size > 0 # The model is trained by static or dynamic chunk assert self.static_chunk_size > 0 or self.use_dynamic_chunk subsampling = self.embed.subsampling_rate context = self.embed.right_context + 1 # Add current frame stride = subsampling * decoding_chunk_size decoding_window = (decoding_chunk_size - 1) * subsampling + context num_frames = xs.size(1) att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0), device=xs.device) cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0), device=xs.device) outputs = [] offset = 0 required_cache_size = decoding_chunk_size * num_decoding_left_chunks # Feed forward overlap input step by step for cur in range(0, num_frames - context + 1, stride): end = min(cur + decoding_window, num_frames) chunk_xs = xs[:, cur:end, :] (y, att_cache, cnn_cache) = self.forward_chunk( chunk_xs, offset, required_cache_size, att_cache, cnn_cache ) outputs.append(y) offset += y.size(1) ys = torch.cat(outputs, 1) masks = torch.ones((1, 1, ys.size(1)), device=ys.device, dtype=torch.bool) return ys, masks class TransformerEncoder(BaseEncoder): """Transformer encoder module.""" def __init__( self, input_size: int, output_size: int = 256, attention_heads: int = 4, linear_units: int = 2048, num_blocks: int = 6, dropout_rate: float = 0.1, positional_dropout_rate: float = 0.1, attention_dropout_rate: float = 0.0, input_layer: str = "conv2d", pos_enc_layer_type: str = "abs_pos", normalize_before: bool = True, static_chunk_size: int = 0, use_dynamic_chunk: bool = False, global_cmvn: torch.nn.Module = None, use_dynamic_left_chunk: bool = False, ): """Construct TransformerEncoder See Encoder for the meaning of each parameter. """ super().__init__( input_size, output_size, attention_heads, linear_units, num_blocks, dropout_rate, positional_dropout_rate, attention_dropout_rate, input_layer, pos_enc_layer_type, normalize_before, static_chunk_size, use_dynamic_chunk, global_cmvn, use_dynamic_left_chunk, ) self.encoders = torch.nn.ModuleList( [ TransformerEncoderLayer( output_size, MultiHeadedAttention( attention_heads, output_size, attention_dropout_rate ),
# This module is from [WeNet](https://github.com/wenet-e2e/wenet). # ## Citations # ```bibtex # @inproceedings{yao2021wenet, # title={WeNet: Production oriented Streaming and Non-streaming End-to-End Speech Recognition Toolkit}, # author={Yao, Zhuoyuan and Wu, Di and Wang, Xiong and Zhang, Binbin and Yu, Fan and Yang, Chao and Peng, Zhendong and Chen, Xiaoyu and Xie, Lei and Lei, Xin}, # booktitle={Proc. Interspeech}, # year={2021}, # address={Brno, Czech Republic }, # organization={IEEE} # } # @article{zhang2022wenet, # title={WeNet 2.0: More Productive End-to-End Speech Recognition Toolkit}, # author={Zhang, Binbin and Wu, Di and Peng, Zhendong and Song, Xingchen and Yao, Zhuoyuan and Lv, Hang and Xie, Lei and Yang, Chao and Pan, Fuping and Niu, Jianwei}, # journal={arXiv preprint arXiv:2203.15455}, # year={2022} # } # """Encoder definition.""" class BaseEncoder(torch.nn.Module): def __init__( self, input_size: int, output_size: int = 256, attention_heads: int = 4, linear_units: int = 2048, num_blocks: int = 6, dropout_rate: float = 0.1, positional_dropout_rate: float = 0.1, attention_dropout_rate: float = 0.0, input_layer: str = "conv2d", pos_enc_layer_type: str = "abs_pos", normalize_before: bool = True, static_chunk_size: int = 0, use_dynamic_chunk: bool = False, global_cmvn: torch.nn.Module = None, use_dynamic_left_chunk: bool = False, ): """ Args: input_size (int): input dim output_size (int): dimension of attention attention_heads (int): the number of heads of multi head attention linear_units (int): the hidden units number of position-wise feed forward num_blocks (int): the number of decoder blocks dropout_rate (float): dropout rate attention_dropout_rate (float): dropout rate in attention positional_dropout_rate (float): dropout rate after adding positional encoding input_layer (str): input layer type. optional [linear, conv2d, conv2d6, conv2d8] pos_enc_layer_type (str): Encoder positional encoding layer type. opitonal [abs_pos, scaled_abs_pos, rel_pos, no_pos] normalize_before (bool): True: use layer_norm before each sub-block of a layer. False: use layer_norm after each sub-block of a layer. static_chunk_size (int): chunk size for static chunk training and decoding use_dynamic_chunk (bool): whether use dynamic chunk size for training or not, You can only use fixed chunk(chunk_size > 0) or dyanmic chunk size(use_dynamic_chunk = True) global_cmvn (Optional[torch.nn.Module]): Optional GlobalCMVN module use_dynamic_left_chunk (bool): whether use dynamic left chunk in dynamic chunk training """ super().__init__() self._output_size = output_size if pos_enc_layer_type == "abs_pos": pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "rel_pos": pos_enc_class = RelPositionalEncoding elif pos_enc_layer_type == "no_pos": pos_enc_class = NoPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) if input_layer == "linear": subsampling_class = LinearNoSubsampling elif input_layer == "conv2d": subsampling_class = Conv2dSubsampling4 elif input_layer == "conv2d6": subsampling_class = Conv2dSubsampling6 elif input_layer == "conv2d8": subsampling_class = Conv2dSubsampling8 else: raise ValueError("unknown input_layer: " + input_layer) self.global_cmvn = global_cmvn self.embed = subsampling_class( input_size, output_size, dropout_rate, pos_enc_class(output_size, positional_dropout_rate), ) self.normalize_before = normalize_before self.after_norm = torch.nn.LayerNorm(output_size, eps=1e-5) self.static_chunk_size = static_chunk_size self.use_dynamic_chunk = use_dynamic_chunk self.use_dynamic_left_chunk = use_dynamic_left_chunk def output_size(self) -> int: return self._output_size def forward( self, xs: torch.Tensor, xs_lens: torch.Tensor, decoding_chunk_size: int = 0, num_decoding_left_chunks: int = -1, ) -> Tuple[torch.Tensor, torch.Tensor]: """Embed positions in tensor. Args: xs: padded input tensor (B, T, D) xs_lens: input length (B) decoding_chunk_size: decoding chunk size for dynamic chunk 0: default for training, use random dynamic chunk. <0: for decoding, use full chunk. >0: for decoding, use fixed chunk size as set. num_decoding_left_chunks: number of left chunks, this is for decoding, the chunk size is decoding_chunk_size. >=0: use num_decoding_left_chunks <0: use all left chunks Returns: encoder output tensor xs, and subsampled masks xs: padded output tensor (B, T' ~= T/subsample_rate, D) masks: torch.Tensor batch padding mask after subsample (B, 1, T' ~= T/subsample_rate) """ T = xs.size(1) masks = ~make_pad_mask(xs_lens, T).unsqueeze(1) # (B, 1, T) if self.global_cmvn is not None: xs = self.global_cmvn(xs) xs, pos_emb, masks = self.embed(xs, masks) mask_pad = masks # (B, 1, T/subsample_rate) chunk_masks = add_optional_chunk_mask( xs, masks, self.use_dynamic_chunk, self.use_dynamic_left_chunk, decoding_chunk_size, self.static_chunk_size, num_decoding_left_chunks, ) for layer in self.encoders: xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad) if self.normalize_before: xs = self.after_norm(xs) # Here we assume the mask is not changed in encoder layers, so just # return the masks before encoder layers, and the masks will be used # for cross attention with decoder later return xs, masks def forward_chunk( self, xs: torch.Tensor, offset: int, required_cache_size: int, att_cache: torch.Tensor = torch.zeros(0, 0, 0, 0), cnn_cache: torch.Tensor = torch.zeros(0, 0, 0, 0), att_mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool), ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Forward just one chunk Args: xs (torch.Tensor): chunk input, with shape (b=1, time, mel-dim), where `time == (chunk_size - 1) * subsample_rate + \ subsample.right_context + 1` offset (int): current offset in encoder output time stamp required_cache_size (int): cache size required for next chunk compuation >=0: actual cache size <0: means all history cache is required att_cache (torch.Tensor): cache tensor for KEY & VALUE in transformer/conformer attention, with shape (elayers, head, cache_t1, d_k * 2), where `head * d_k == hidden-dim` and `cache_t1 == chunk_size * num_decoding_left_chunks`. cnn_cache (torch.Tensor): cache tensor for cnn_module in conformer, (elayers, b=1, hidden-dim, cache_t2), where `cache_t2 == cnn.lorder - 1` Returns: torch.Tensor: output of current input xs, with shape (b=1, chunk_size, hidden-dim). torch.Tensor: new attention cache required for next chunk, with dynamic shape (elayers, head, ?, d_k * 2) depending on required_cache_size. torch.Tensor: new conformer cnn cache required for next chunk, with same shape as the original cnn_cache. """ assert xs.size(0) == 1 # tmp_masks is just for interface compatibility tmp_masks = torch.ones(1, xs.size(1), device=xs.device, dtype=torch.bool) tmp_masks = tmp_masks.unsqueeze(1) if self.global_cmvn is not None: xs = self.global_cmvn(xs) # NOTE(xcsong): Before embed, shape(xs) is (b=1, time, mel-dim) xs, pos_emb, _ = self.embed(xs, tmp_masks, offset) # NOTE(xcsong): After embed, shape(xs) is (b=1, chunk_size, hidden-dim) elayers, cache_t1 = att_cache.size(0), att_cache.size(2) chunk_size = xs.size(1) attention_key_size = cache_t1 + chunk_size pos_emb = self.embed.position_encoding( offset=offset - cache_t1, size=attention_key_size ) if required_cache_size < 0: next_cache_start = 0 elif required_cache_size == 0: next_cache_start = attention_key_size else: next_cache_start = max(attention_key_size - required_cache_size, 0) r_att_cache = [] r_cnn_cache = [] for i, layer in enumerate(self.encoders): # NOTE(xcsong): Before layer.forward # shape(att_cache[i:i + 1]) is (1, head, cache_t1, d_k * 2), # shape(cnn_cache[i]) is (b=1, hidden-dim, cache_t2) xs, _, new_att_cache, new_cnn_cache = layer( xs, att_mask, pos_emb, att_cache=att_cache[i : i + 1] if elayers > 0 else att_cache, cnn_cache=cnn_cache[i] if cnn_cache.size(0) > 0 else cnn_cache, ) # NOTE(xcsong): After layer.forward # shape(new_att_cache) is (1, head, attention_key_size, d_k * 2), # shape(new_cnn_cache) is (b=1, hidden-dim, cache_t2) r_att_cache.append(new_att_cache[:, :, next_cache_start:, :]) r_cnn_cache.append(new_cnn_cache.unsqueeze(0)) if self.normalize_before: xs = self.after_norm(xs) # NOTE(xcsong): shape(r_att_cache) is (elayers, head, ?, d_k * 2), # ? may be larger than cache_t1, it depends on required_cache_size r_att_cache = torch.cat(r_att_cache, dim=0) # NOTE(xcsong): shape(r_cnn_cache) is (e, b=1, hidden-dim, cache_t2) r_cnn_cache = torch.cat(r_cnn_cache, dim=0) return (xs, r_att_cache, r_cnn_cache) def forward_chunk_by_chunk( self, xs: torch.Tensor, decoding_chunk_size: int, num_decoding_left_chunks: int = -1, ) -> Tuple[torch.Tensor, torch.Tensor]: """Forward input chunk by chunk with chunk_size like a streaming fashion Here we should pay special attention to computation cache in the streaming style forward chunk by chunk. Three things should be taken into account for computation in the current network: 1. transformer/conformer encoder layers output cache 2. convolution in conformer 3. convolution in subsampling However, we don't implement subsampling cache for: 1. We can control subsampling module to output the right result by overlapping input instead of cache left context, even though it wastes some computation, but subsampling only takes a very small fraction of computation in the whole model. 2. Typically, there are several covolution layers with subsampling in subsampling module, it is tricky and complicated to do cache with different convolution layers with different subsampling rate. 3. Currently, nn.Sequential is used to stack all the convolution layers in subsampling, we need to rewrite it to make it work with cache, which is not prefered. Args: xs (torch.Tensor): (1, max_len, dim) chunk_size (int): decoding chunk size """ assert decoding_chunk_size > 0 # The model is trained by static or dynamic chunk assert self.static_chunk_size > 0 or self.use_dynamic_chunk subsampling = self.embed.subsampling_rate context = self.embed.right_context + 1 # Add current frame stride = subsampling * decoding_chunk_size decoding_window = (decoding_chunk_size - 1) * subsampling + context num_frames = xs.size(1) att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0), device=xs.device) cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0), device=xs.device) outputs = [] offset = 0 required_cache_size = decoding_chunk_size * num_decoding_left_chunks # Feed forward overlap input step by step for cur in range(0, num_frames - context + 1, stride): end = min(cur + decoding_window, num_frames) chunk_xs = xs[:, cur:end, :] (y, att_cache, cnn_cache) = self.forward_chunk( chunk_xs, offset, required_cache_size, att_cache, cnn_cache ) outputs.append(y) offset += y.size(1) ys = torch.cat(outputs, 1) masks = torch.ones((1, 1, ys.size(1)), device=ys.device, dtype=torch.bool) return ys, masks class TransformerEncoder(BaseEncoder): """Transformer encoder module.""" def __init__( self, input_size: int, output_size: int = 256, attention_heads: int = 4, linear_units: int = 2048, num_blocks: int = 6, dropout_rate: float = 0.1, positional_dropout_rate: float = 0.1, attention_dropout_rate: float = 0.0, input_layer: str = "conv2d", pos_enc_layer_type: str = "abs_pos", normalize_before: bool = True, static_chunk_size: int = 0, use_dynamic_chunk: bool = False, global_cmvn: torch.nn.Module = None, use_dynamic_left_chunk: bool = False, ): """Construct TransformerEncoder See Encoder for the meaning of each parameter. """ super().__init__( input_size, output_size, attention_heads, linear_units, num_blocks, dropout_rate, positional_dropout_rate, attention_dropout_rate, input_layer, pos_enc_layer_type, normalize_before, static_chunk_size, use_dynamic_chunk, global_cmvn, use_dynamic_left_chunk, ) self.encoders = torch.nn.ModuleList( [ TransformerEncoderLayer( output_size, MultiHeadedAttention( attention_heads, output_size, attention_dropout_rate ),
PositionwiseFeedForward(output_size, linear_units, dropout_rate),
8
2023-11-15 09:19:27+00:00
24k
BobaZooba/xllm
tests/unit/core/test_dependencies.py
[ { "identifier": "LMCollator", "path": "src/xllm/collators/lm.py", "snippet": "class LMCollator(BaseCollator):\n \"\"\"\n `LMCollator` is a data collator class specifically designed to prepare batches of data for language modeling tasks.\n Extending the `BaseCollator`, it adapts the general data...
import pytest from peft import PeftModel from pytest import MonkeyPatch from torch import Tensor from transformers import ( BitsAndBytesConfig, GPTQConfig, PreTrainedTokenizer, TrainingArguments, ) from src.xllm.collators.lm import LMCollator from src.xllm.collators.registry import collators_registry from src.xllm.core.config import Config from src.xllm.core.dependencies import ( build_collator, build_dataset, build_model, build_quantization_config, build_tokenizer, build_trainer, build_training_arguments, ) from src.xllm.datasets.registry import datasets_registry from src.xllm.datasets.soda import SodaDataset from src.xllm.trainers.registry import trainers_registry from tests.helpers.constants import LLAMA_TOKENIZER_DIR from tests.helpers.dummy_data import DATA, DummyDataset from tests.helpers.patches import patch_from_pretrained_auto_causal_lm
19,271
def test_build_training_arguments(config: Config): arguments = build_training_arguments(config=config) assert arguments.per_device_train_batch_size == config.per_device_train_batch_size assert arguments.deepspeed is None def test_build_dataset_train(path_to_train_dummy_data: str): datasets_registry.add(key="dummy", value=DummyDataset) config = Config(dataset_key="dummy", train_local_path_to_data=path_to_train_dummy_data) dataset = build_dataset(config=config, is_train=True) assert dataset[0] is not None def test_build_dataset_eval(path_to_train_dummy_data: str): datasets_registry.add(key="dummy1", value=DummyDataset) config = Config(dataset_key="dummy1", eval_local_path_to_data=path_to_train_dummy_data) dataset = build_dataset(config=config, is_train=False) assert dataset[0] is not None def test_build_dataset_eval_none(path_to_train_dummy_data: str): datasets_registry.add(key="dummy2", value=DummyDataset) config = Config( dataset_key="dummy2", train_local_path_to_data=path_to_train_dummy_data, eval_local_path_to_data=None, ) dataset = build_dataset(config=config, is_train=False) assert dataset is None def test_build_dataset_exception(path_to_train_dummy_data: str): datasets_registry.add(key="exc", value=Config) config = Config(dataset_key="exc", train_local_path_to_data=path_to_train_dummy_data) with pytest.raises(ValueError): build_dataset(config=config, is_train=True) def test_build_tokenizer(): config = Config(tokenizer_name_or_path=LLAMA_TOKENIZER_DIR) tokenizer = build_tokenizer(config=config) tokenizer("hello") def test_build_tokenizer_use_fast(): config = Config(tokenizer_name_or_path=LLAMA_TOKENIZER_DIR) tokenizer = build_tokenizer(config=config, use_fast=False) tokenizer("hello") def test_build_tokenizer_padding_size(): config = Config(tokenizer_name_or_path=LLAMA_TOKENIZER_DIR, tokenizer_padding_side="right") tokenizer = build_tokenizer(config=config) tokenizer("hello") def test_build_collator(config: Config, llama_tokenizer: PreTrainedTokenizer): collator = build_collator(config=config, tokenizer=llama_tokenizer) batch = collator(DATA) for value in batch.values(): assert isinstance(value, Tensor) def test_build_collator_exception(llama_tokenizer: PreTrainedTokenizer): collators_registry.add(key="exc", value=Config) config = Config(collator_key="exc") with pytest.raises(ValueError): _ = build_collator(config=config, tokenizer=llama_tokenizer) def test_build_quantization_config_bnb(): config = Config(load_in_8bit=True) quantization_config = build_quantization_config(config=config) assert isinstance(quantization_config, BitsAndBytesConfig) assert quantization_config.load_in_8bit def test_build_quantization_config_gptq(): config = Config(gptq_bits=4, gptq_group_size=128, from_gptq=True) quantization_config = build_quantization_config(config=config) assert isinstance(quantization_config, GPTQConfig) assert quantization_config.bits == 4 assert quantization_config.group_size == 128 def test_build_quantization_config_none(): config = Config(from_gptq=False, load_in_4bit=False, load_in_8bit=False) quantization_config = build_quantization_config(config=config) assert quantization_config is None @pytest.mark.parametrize("apply_lora", [False, True]) def test_build_model(monkeypatch: MonkeyPatch, apply_lora: bool): config = Config(apply_lora=apply_lora) with patch_from_pretrained_auto_causal_lm(monkeypatch=monkeypatch): _ = build_model( config=config, quantization_config=None, ) def test_build_model_bnb_after_init(monkeypatch: MonkeyPatch): config = Config(bnb_quantize_after_model_init=True) with patch_from_pretrained_auto_causal_lm(monkeypatch=monkeypatch): _ = build_model( config=config, quantization_config=None, ) def test_build_trainer( config: Config, training_arguments: TrainingArguments, llama_lora_model: PeftModel, soda_dataset: SodaDataset, llama_lm_collator: LMCollator, ):
# Copyright 2023 Boris Zubarev. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def test_build_training_arguments(config: Config): arguments = build_training_arguments(config=config) assert arguments.per_device_train_batch_size == config.per_device_train_batch_size assert arguments.deepspeed is None def test_build_dataset_train(path_to_train_dummy_data: str): datasets_registry.add(key="dummy", value=DummyDataset) config = Config(dataset_key="dummy", train_local_path_to_data=path_to_train_dummy_data) dataset = build_dataset(config=config, is_train=True) assert dataset[0] is not None def test_build_dataset_eval(path_to_train_dummy_data: str): datasets_registry.add(key="dummy1", value=DummyDataset) config = Config(dataset_key="dummy1", eval_local_path_to_data=path_to_train_dummy_data) dataset = build_dataset(config=config, is_train=False) assert dataset[0] is not None def test_build_dataset_eval_none(path_to_train_dummy_data: str): datasets_registry.add(key="dummy2", value=DummyDataset) config = Config( dataset_key="dummy2", train_local_path_to_data=path_to_train_dummy_data, eval_local_path_to_data=None, ) dataset = build_dataset(config=config, is_train=False) assert dataset is None def test_build_dataset_exception(path_to_train_dummy_data: str): datasets_registry.add(key="exc", value=Config) config = Config(dataset_key="exc", train_local_path_to_data=path_to_train_dummy_data) with pytest.raises(ValueError): build_dataset(config=config, is_train=True) def test_build_tokenizer(): config = Config(tokenizer_name_or_path=LLAMA_TOKENIZER_DIR) tokenizer = build_tokenizer(config=config) tokenizer("hello") def test_build_tokenizer_use_fast(): config = Config(tokenizer_name_or_path=LLAMA_TOKENIZER_DIR) tokenizer = build_tokenizer(config=config, use_fast=False) tokenizer("hello") def test_build_tokenizer_padding_size(): config = Config(tokenizer_name_or_path=LLAMA_TOKENIZER_DIR, tokenizer_padding_side="right") tokenizer = build_tokenizer(config=config) tokenizer("hello") def test_build_collator(config: Config, llama_tokenizer: PreTrainedTokenizer): collator = build_collator(config=config, tokenizer=llama_tokenizer) batch = collator(DATA) for value in batch.values(): assert isinstance(value, Tensor) def test_build_collator_exception(llama_tokenizer: PreTrainedTokenizer): collators_registry.add(key="exc", value=Config) config = Config(collator_key="exc") with pytest.raises(ValueError): _ = build_collator(config=config, tokenizer=llama_tokenizer) def test_build_quantization_config_bnb(): config = Config(load_in_8bit=True) quantization_config = build_quantization_config(config=config) assert isinstance(quantization_config, BitsAndBytesConfig) assert quantization_config.load_in_8bit def test_build_quantization_config_gptq(): config = Config(gptq_bits=4, gptq_group_size=128, from_gptq=True) quantization_config = build_quantization_config(config=config) assert isinstance(quantization_config, GPTQConfig) assert quantization_config.bits == 4 assert quantization_config.group_size == 128 def test_build_quantization_config_none(): config = Config(from_gptq=False, load_in_4bit=False, load_in_8bit=False) quantization_config = build_quantization_config(config=config) assert quantization_config is None @pytest.mark.parametrize("apply_lora", [False, True]) def test_build_model(monkeypatch: MonkeyPatch, apply_lora: bool): config = Config(apply_lora=apply_lora) with patch_from_pretrained_auto_causal_lm(monkeypatch=monkeypatch): _ = build_model( config=config, quantization_config=None, ) def test_build_model_bnb_after_init(monkeypatch: MonkeyPatch): config = Config(bnb_quantize_after_model_init=True) with patch_from_pretrained_auto_causal_lm(monkeypatch=monkeypatch): _ = build_model( config=config, quantization_config=None, ) def test_build_trainer( config: Config, training_arguments: TrainingArguments, llama_lora_model: PeftModel, soda_dataset: SodaDataset, llama_lm_collator: LMCollator, ):
trainer = build_trainer(
8
2023-11-10 17:55:03+00:00
24k
AMAAI-Lab/mustango
diffusers/src/diffusers/models/unet_2d_condition_flax.py
[ { "identifier": "ConfigMixin", "path": "diffusers/src/diffusers/configuration_utils.py", "snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. Stores all configuration parameters under `self.config` Also handles all\n methods for loading/downloading/saving classes...
from typing import Tuple, Union from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_2d_blocks_flax import ( FlaxCrossAttnDownBlock2D, FlaxCrossAttnUpBlock2D, FlaxDownBlock2D, FlaxUNetMidBlock2DCrossAttn, FlaxUpBlock2D, ) import flax import flax.linen as nn import jax import jax.numpy as jnp
17,317
# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @flax.struct.dataclass class FlaxUNet2DConditionOutput(BaseOutput): """ Args: sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`): Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model. """ sample: jnp.ndarray @flax_register_to_config class FlaxUNet2DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin): r""" FlaxUNet2DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep and returns sample shaped output. This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for the generic methods the library implements for all the models (such as downloading or saving, etc.) Also, this model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: sample_size (`int`, *optional*): The size of the input sample. in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): The number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. The corresponding class names will be: "FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxDownBlock2D" up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`): The tuple of upsample blocks to use. The corresponding class names will be: "FlaxUpBlock2D", "FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D" block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. attention_head_dim (`int` or `Tuple[int]`, *optional*, defaults to 8): The dimension of the attention heads. cross_attention_dim (`int`, *optional*, defaults to 768): The dimension of the cross attention features. dropout (`float`, *optional*, defaults to 0): Dropout probability for down, up and bottleneck blocks. flip_sin_to_cos (`bool`, *optional*, defaults to `True`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. """ sample_size: int = 32 in_channels: int = 4 out_channels: int = 4 down_block_types: Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") only_cross_attention: Union[bool, Tuple[bool]] = False block_out_channels: Tuple[int] = (320, 640, 1280, 1280) layers_per_block: int = 2 attention_head_dim: Union[int, Tuple[int]] = 8 cross_attention_dim: int = 1280 dropout: float = 0.0 use_linear_projection: bool = False dtype: jnp.dtype = jnp.float32 flip_sin_to_cos: bool = True freq_shift: int = 0 def init_weights(self, rng: jax.random.KeyArray) -> FrozenDict: # init input tensors sample_shape = (1, self.in_channels, self.sample_size, self.sample_size) sample = jnp.zeros(sample_shape, dtype=jnp.float32) timesteps = jnp.ones((1,), dtype=jnp.int32) encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} return self.init(rngs, sample, timesteps, encoder_hidden_states)["params"] def setup(self): block_out_channels = self.block_out_channels time_embed_dim = block_out_channels[0] * 4 # input self.conv_in = nn.Conv( block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) # time self.time_proj = FlaxTimesteps( block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @flax.struct.dataclass class FlaxUNet2DConditionOutput(BaseOutput): """ Args: sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`): Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model. """ sample: jnp.ndarray @flax_register_to_config class FlaxUNet2DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin): r""" FlaxUNet2DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep and returns sample shaped output. This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for the generic methods the library implements for all the models (such as downloading or saving, etc.) Also, this model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: sample_size (`int`, *optional*): The size of the input sample. in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): The number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. The corresponding class names will be: "FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxDownBlock2D" up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`): The tuple of upsample blocks to use. The corresponding class names will be: "FlaxUpBlock2D", "FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D" block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. attention_head_dim (`int` or `Tuple[int]`, *optional*, defaults to 8): The dimension of the attention heads. cross_attention_dim (`int`, *optional*, defaults to 768): The dimension of the cross attention features. dropout (`float`, *optional*, defaults to 0): Dropout probability for down, up and bottleneck blocks. flip_sin_to_cos (`bool`, *optional*, defaults to `True`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. """ sample_size: int = 32 in_channels: int = 4 out_channels: int = 4 down_block_types: Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") only_cross_attention: Union[bool, Tuple[bool]] = False block_out_channels: Tuple[int] = (320, 640, 1280, 1280) layers_per_block: int = 2 attention_head_dim: Union[int, Tuple[int]] = 8 cross_attention_dim: int = 1280 dropout: float = 0.0 use_linear_projection: bool = False dtype: jnp.dtype = jnp.float32 flip_sin_to_cos: bool = True freq_shift: int = 0 def init_weights(self, rng: jax.random.KeyArray) -> FrozenDict: # init input tensors sample_shape = (1, self.in_channels, self.sample_size, self.sample_size) sample = jnp.zeros(sample_shape, dtype=jnp.float32) timesteps = jnp.ones((1,), dtype=jnp.int32) encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} return self.init(rngs, sample, timesteps, encoder_hidden_states)["params"] def setup(self): block_out_channels = self.block_out_channels time_embed_dim = block_out_channels[0] * 4 # input self.conv_in = nn.Conv( block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) # time self.time_proj = FlaxTimesteps( block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
self.time_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype)
3
2023-11-14 23:29:31+00:00
24k
BraveGroup/Drive-WM
src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py
[ { "identifier": "logging", "path": "src/diffusers/utils/logging.py", "snippet": "def _get_default_logging_level() -> int:\ndef _get_library_name() -> str:\ndef _get_library_root_logger() -> logging.Logger:\ndef _configure_library_root_logger() -> None:\ndef _reset_library_root_logger() -> None:\ndef get...
from typing import List, Optional, Tuple, Union from ...utils import logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline import torch
20,873
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.get_logger(__name__) # pylint: disable=invalid-name class DanceDiffusionPipeline(DiffusionPipeline): r""" Pipeline for audio generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: unet ([`UNet1DModel`]): A `UNet1DModel` to denoise the encoded audio. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of [`IPNDMScheduler`]. """ model_cpu_offload_seq = "unet" def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, batch_size: int = 1, num_inference_steps: int = 100, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, audio_length_in_s: Optional[float] = None, return_dict: bool = True, ) -> Union[AudioPipelineOutput, Tuple]: r""" The call function to the pipeline for generation. Args: batch_size (`int`, *optional*, defaults to 1): The number of audio samples to generate. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher-quality audio sample at the expense of slower inference. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. audio_length_in_s (`float`, *optional*, defaults to `self.unet.config.sample_size/self.unet.config.sample_rate`): The length of the generated audio sample in seconds. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple. Example: ```py from diffusers import DiffusionPipeline from scipy.io.wavfile import write model_id = "harmonai/maestro-150k" pipe = DiffusionPipeline.from_pretrained(model_id) pipe = pipe.to("cuda") audios = pipe(audio_length_in_s=4.0).audios # To save locally for i, audio in enumerate(audios): write(f"maestro_test_{i}.wav", pipe.unet.sample_rate, audio.transpose()) # To dislay in google colab import IPython.display as ipd for audio in audios: display(ipd.Audio(audio, rate=pipe.unet.sample_rate)) ``` Returns: [`~pipelines.AudioPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated audio. """ if audio_length_in_s is None: audio_length_in_s = self.unet.config.sample_size / self.unet.config.sample_rate sample_size = audio_length_in_s * self.unet.config.sample_rate down_scale_factor = 2 ** len(self.unet.up_blocks) if sample_size < 3 * down_scale_factor: raise ValueError( f"{audio_length_in_s} is too small. Make sure it's bigger or equal to" f" {3 * down_scale_factor / self.unet.config.sample_rate}." ) original_sample_size = int(sample_size) if sample_size % down_scale_factor != 0: sample_size = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled" f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising" " process." ) sample_size = int(sample_size) dtype = next(self.unet.parameters()).dtype shape = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.get_logger(__name__) # pylint: disable=invalid-name class DanceDiffusionPipeline(DiffusionPipeline): r""" Pipeline for audio generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: unet ([`UNet1DModel`]): A `UNet1DModel` to denoise the encoded audio. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of [`IPNDMScheduler`]. """ model_cpu_offload_seq = "unet" def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, batch_size: int = 1, num_inference_steps: int = 100, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, audio_length_in_s: Optional[float] = None, return_dict: bool = True, ) -> Union[AudioPipelineOutput, Tuple]: r""" The call function to the pipeline for generation. Args: batch_size (`int`, *optional*, defaults to 1): The number of audio samples to generate. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher-quality audio sample at the expense of slower inference. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. audio_length_in_s (`float`, *optional*, defaults to `self.unet.config.sample_size/self.unet.config.sample_rate`): The length of the generated audio sample in seconds. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple. Example: ```py from diffusers import DiffusionPipeline from scipy.io.wavfile import write model_id = "harmonai/maestro-150k" pipe = DiffusionPipeline.from_pretrained(model_id) pipe = pipe.to("cuda") audios = pipe(audio_length_in_s=4.0).audios # To save locally for i, audio in enumerate(audios): write(f"maestro_test_{i}.wav", pipe.unet.sample_rate, audio.transpose()) # To dislay in google colab import IPython.display as ipd for audio in audios: display(ipd.Audio(audio, rate=pipe.unet.sample_rate)) ``` Returns: [`~pipelines.AudioPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated audio. """ if audio_length_in_s is None: audio_length_in_s = self.unet.config.sample_size / self.unet.config.sample_rate sample_size = audio_length_in_s * self.unet.config.sample_rate down_scale_factor = 2 ** len(self.unet.up_blocks) if sample_size < 3 * down_scale_factor: raise ValueError( f"{audio_length_in_s} is too small. Make sure it's bigger or equal to" f" {3 * down_scale_factor / self.unet.config.sample_rate}." ) original_sample_size = int(sample_size) if sample_size % down_scale_factor != 0: sample_size = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled" f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising" " process." ) sample_size = int(sample_size) dtype = next(self.unet.parameters()).dtype shape = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
audio = randn_tensor(shape, generator=generator, device=self._execution_device, dtype=dtype)
1
2023-11-18 01:40:55+00:00
24k
wjun0830/CGDETR
cg_detr/train.py
[ { "identifier": "BaseOptions", "path": "cg_detr/config.py", "snippet": "class BaseOptions(object):\n saved_option_filename = \"opt.json\"\n ckpt_filename = \"model.ckpt\"\n tensorboard_log_dir = \"tensorboard_log\"\n train_log_filename = \"train.log.txt\"\n eval_log_filename = \"eval.log....
import os import time import json import pprint import random import numpy as np import torch import torch.nn as nn import torch.backends.cudnn as cudnn import logging import sys from tqdm import tqdm, trange from collections import defaultdict from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from cg_detr.config import BaseOptions from cg_detr.start_end_dataset import \ StartEndDataset, start_end_collate, prepare_batch_inputs from cg_detr.inference import eval_epoch, start_inference, setup_model from utils.basic_utils import AverageMeter, dict_to_markdown from utils.model_utils import count_parameters
14,870
es_cnt = 0 prev_best_score = stop_score checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_best.ckpt")) best_file_paths = [e.replace("latest", "best") for e in latest_file_paths] for src, tgt in zip(latest_file_paths, best_file_paths): os.renames(src, tgt) logger.info("The checkpoint file has been updated.") else: es_cnt += 1 if opt.max_es_cnt != -1 and es_cnt > opt.max_es_cnt: # early stop with open(opt.train_log_filepath, "a") as f: f.write(f"Early Stop at epoch {epoch_i}") logger.info(f"\n>>>>> Early stop at epoch {epoch_i} {prev_best_score}\n") break # save ckpt checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_latest.ckpt")) save_interval = 10 if "subs_train" in opt.train_path else 50 # smaller for pretrain if (epoch_i + 1) % save_interval == 0 or (epoch_i + 1) % opt.lr_drop == 0: # additional copies checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", f"_e{epoch_i:04d}.ckpt")) if opt.debug: break tb_writer.close() def start_training(): logger.info("Setup config, data and model...") opt = BaseOptions().parse() set_seed(opt.seed) if opt.debug: # keep the model run deterministically # 'cudnn.benchmark = True' enabled auto finding the best algorithm for a specific input/net config. # Enable this only when input size is fixed. cudnn.benchmark = False cudnn.deterministic = True dataset_config = dict( dset_name=opt.dset_name, data_path=opt.train_path, v_feat_dirs=opt.v_feat_dirs, q_feat_dir=opt.t_feat_dir, q_feat_type="last_hidden_state", max_q_l=opt.max_q_l, max_v_l=opt.max_v_l, ctx_mode=opt.ctx_mode, data_ratio=opt.data_ratio, normalize_v=not opt.no_norm_vfeat, normalize_t=not opt.no_norm_tfeat, clip_len=opt.clip_length, max_windows=opt.max_windows, span_loss_type=opt.span_loss_type, txt_drop_ratio=opt.txt_drop_ratio, dset_domain=opt.dset_domain, ) dataset_config["data_path"] = opt.train_path train_dataset = StartEndDataset(**dataset_config) if opt.eval_path is not None: dataset_config["data_path"] = opt.eval_path dataset_config["txt_drop_ratio"] = 0 dataset_config["q_feat_dir"] = opt.t_feat_dir.replace("sub_features", "text_features") # for pretraining # dataset_config["load_labels"] = False # uncomment to calculate eval loss eval_dataset = StartEndDataset(**dataset_config) else: eval_dataset = None model, criterion, optimizer, lr_scheduler = setup_model(opt) logger.info(f"Model {model}") count_parameters(model) logger.info("Start Training...") # For tvsum dataset, use train_hl function if opt.dset_name in ['tvsum', 'youtube_uni']: train_hl(model, criterion, optimizer, lr_scheduler, train_dataset, eval_dataset, opt) else: train(model, criterion, optimizer, lr_scheduler, train_dataset, eval_dataset, opt) return opt.ckpt_filepath.replace(".ckpt", "_best.ckpt"), opt.eval_split_name, opt.eval_path, opt.debug, opt if __name__ == '__main__': best_ckpt_path, eval_split_name, eval_path, debug, opt = start_training() if not debug: input_args = ["--resume", best_ckpt_path, "--eval_split_name", eval_split_name, "--eval_path", eval_path] sys.argv[1:] = input_args logger.info("\n\n\nFINISHED TRAINING!!!") logger.info("Evaluating model at {}".format(best_ckpt_path)) logger.info("Input args {}".format(sys.argv[1:]))
logger = logging.getLogger(__name__) logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO) def set_seed(seed, use_cuda=True): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if use_cuda: torch.cuda.manual_seed_all(seed) def train_epoch(model, criterion, train_loader, optimizer, opt, epoch_i, tb_writer): logger.info(f"[Epoch {epoch_i+1}]") model.train() criterion.train() # init meters time_meters = defaultdict(AverageMeter) loss_meters = defaultdict(AverageMeter) num_training_examples = len(train_loader) timer_dataloading = time.time() for batch_idx, batch in tqdm(enumerate(train_loader), desc="Training Iteration", total=num_training_examples): time_meters["dataloading_time"].update(time.time() - timer_dataloading) timer_start = time.time() model_inputs, targets = prepare_batch_inputs(batch[1], opt.device, non_blocking=opt.pin_memory) time_meters["prepare_inputs_time"].update(time.time() - timer_start) timer_start = time.time() outputs = model(**model_inputs, targets=targets) loss_dict = criterion(outputs, targets) weight_dict = criterion.weight_dict losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) time_meters["model_forward_time"].update(time.time() - timer_start) timer_start = time.time() optimizer.zero_grad() losses.backward() if opt.grad_clip > 0: nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip) optimizer.step() time_meters["model_backward_time"].update(time.time() - timer_start) loss_dict["loss_overall"] = float(losses) # for logging only for k, v in loss_dict.items(): loss_meters[k].update(float(v) * weight_dict[k] if k in weight_dict else float(v)) timer_dataloading = time.time() if opt.debug and batch_idx == 3: break # print/add logs tb_writer.add_scalar("Train/lr", float(optimizer.param_groups[0]["lr"]), epoch_i+1) for k, v in loss_meters.items(): tb_writer.add_scalar("Train/{}".format(k), v.avg, epoch_i+1) to_write = opt.train_log_txt_formatter.format( time_str=time.strftime("%Y_%m_%d_%H_%M_%S"), epoch=epoch_i+1, loss_str=" ".join(["{} {:.4f}".format(k, v.avg) for k, v in loss_meters.items()])) with open(opt.train_log_filepath, "a") as f: f.write(to_write) logger.info("Epoch time stats:") for name, meter in time_meters.items(): d = {k: f"{getattr(meter, k):.4f}" for k in ["max", "min", "avg"]} logger.info(f"{name} ==> {d}") def train(model, criterion, optimizer, lr_scheduler, train_dataset, val_dataset, opt): if opt.device.type == "cuda": logger.info("CUDA enabled.") model.to(opt.device) tb_writer = SummaryWriter(opt.tensorboard_log_dir) tb_writer.add_text("hyperparameters", dict_to_markdown(vars(opt), max_str_len=None)) opt.train_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str}\n" opt.eval_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str} [Metrics] {eval_metrics_str}\n" train_loader = DataLoader( train_dataset, collate_fn=start_end_collate, batch_size=opt.bsz, num_workers=opt.num_workers, shuffle=True, pin_memory=opt.pin_memory ) prev_best_score = 0. es_cnt = 0 # start_epoch = 0 if opt.start_epoch is None: start_epoch = -1 if opt.eval_untrained else 0 else: start_epoch = opt.start_epoch save_submission_filename = "latest_{}_{}_preds.jsonl".format(opt.dset_name, opt.eval_split_name) for epoch_i in trange(start_epoch, opt.n_epoch, desc="Epoch"): if epoch_i > -1: train_epoch(model, criterion, train_loader, optimizer, opt, epoch_i, tb_writer) lr_scheduler.step() eval_epoch_interval = opt.eval_epoch if opt.eval_path is not None and (epoch_i + 1) % eval_epoch_interval == 0: with torch.no_grad(): metrics_no_nms, metrics_nms, eval_loss_meters, latest_file_paths = \ eval_epoch(model, val_dataset, opt, save_submission_filename, epoch_i, criterion, tb_writer) # log to_write = opt.eval_log_txt_formatter.format( time_str=time.strftime("%Y_%m_%d_%H_%M_%S"), epoch=epoch_i, loss_str=" ".join(["{} {:.4f}".format(k, v.avg) for k, v in eval_loss_meters.items()]), eval_metrics_str=json.dumps(metrics_no_nms)) with open(opt.eval_log_filepath, "a") as f: f.write(to_write) logger.info("metrics_no_nms {}".format(pprint.pformat(metrics_no_nms["brief"], indent=4))) if metrics_nms is not None: logger.info("metrics_nms {}".format(pprint.pformat(metrics_nms["brief"], indent=4))) metrics = metrics_no_nms for k, v in metrics["brief"].items(): tb_writer.add_scalar(f"Eval/{k}", float(v), epoch_i+1) if opt.dset_name in ['hl']: stop_score = metrics["brief"]["MR-full-mAP"] else: stop_score = (metrics["brief"]["MR-full-R1@0.7"] + metrics["brief"]["MR-full-R1@0.5"]) / 2 if stop_score > prev_best_score: es_cnt = 0 prev_best_score = stop_score checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_best.ckpt")) best_file_paths = [e.replace("latest", "best") for e in latest_file_paths] for src, tgt in zip(latest_file_paths, best_file_paths): os.renames(src, tgt) logger.info("The checkpoint file has been updated.") else: es_cnt += 1 if opt.max_es_cnt != -1 and es_cnt > opt.max_es_cnt: # early stop with open(opt.train_log_filepath, "a") as f: f.write(f"Early Stop at epoch {epoch_i}") logger.info(f"\n>>>>> Early stop at epoch {epoch_i} {prev_best_score}\n") break # save ckpt checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_latest.ckpt")) # save_interval = 10 if "subs_train" in opt.train_path else 50 # smaller for pretrain # if (epoch_i + 1) % save_interval == 0 or (epoch_i + 1) % opt.lr_drop == 0: # additional copies # checkpoint = { # "model": model.state_dict(), # "optimizer": optimizer.state_dict(), # "epoch": epoch_i, # "opt": opt # } # torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", f"_e{epoch_i:04d}.ckpt")) if opt.debug: break tb_writer.close() def train_hl(model, criterion, optimizer, lr_scheduler, train_dataset, val_dataset, opt): if opt.device.type == "cuda": logger.info("CUDA enabled.") model.to(opt.device) tb_writer = SummaryWriter(opt.tensorboard_log_dir) tb_writer.add_text("hyperparameters", dict_to_markdown(vars(opt), max_str_len=None)) opt.train_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str}\n" opt.eval_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str} [Metrics] {eval_metrics_str}\n" train_loader = DataLoader( train_dataset, collate_fn=start_end_collate, batch_size=opt.bsz, num_workers=opt.num_workers, shuffle=True, pin_memory=opt.pin_memory ) prev_best_score = 0. es_cnt = 0 # start_epoch = 0 if opt.start_epoch is None: start_epoch = -1 if opt.eval_untrained else 0 else: start_epoch = opt.start_epoch save_submission_filename = "latest_{}_{}_preds.jsonl".format(opt.dset_name, opt.eval_split_name) for epoch_i in trange(start_epoch, opt.n_epoch, desc="Epoch"): if epoch_i > -1: train_epoch(model, criterion, train_loader, optimizer, opt, epoch_i, tb_writer) lr_scheduler.step() eval_epoch_interval = 5 if opt.eval_path is not None and (epoch_i + 1) % eval_epoch_interval == 0: with torch.no_grad(): metrics_no_nms, metrics_nms, eval_loss_meters, latest_file_paths = \ eval_epoch(model, val_dataset, opt, save_submission_filename, epoch_i, criterion, tb_writer) # log to_write = opt.eval_log_txt_formatter.format( time_str=time.strftime("%Y_%m_%d_%H_%M_%S"), epoch=epoch_i, loss_str=" ".join(["{} {:.4f}".format(k, v.avg) for k, v in eval_loss_meters.items()]), eval_metrics_str=json.dumps(metrics_no_nms)) with open(opt.eval_log_filepath, "a") as f: f.write(to_write) logger.info("metrics_no_nms {}".format(pprint.pformat(metrics_no_nms["brief"], indent=4))) if metrics_nms is not None: logger.info("metrics_nms {}".format(pprint.pformat(metrics_nms["brief"], indent=4))) metrics = metrics_no_nms for k, v in metrics["brief"].items(): tb_writer.add_scalar(f"Eval/{k}", float(v), epoch_i+1) # stop_score = metrics["brief"]["MR-full-mAP"] stop_score = metrics["brief"]["mAP"] if stop_score > prev_best_score: es_cnt = 0 prev_best_score = stop_score checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_best.ckpt")) best_file_paths = [e.replace("latest", "best") for e in latest_file_paths] for src, tgt in zip(latest_file_paths, best_file_paths): os.renames(src, tgt) logger.info("The checkpoint file has been updated.") else: es_cnt += 1 if opt.max_es_cnt != -1 and es_cnt > opt.max_es_cnt: # early stop with open(opt.train_log_filepath, "a") as f: f.write(f"Early Stop at epoch {epoch_i}") logger.info(f"\n>>>>> Early stop at epoch {epoch_i} {prev_best_score}\n") break # save ckpt checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_latest.ckpt")) save_interval = 10 if "subs_train" in opt.train_path else 50 # smaller for pretrain if (epoch_i + 1) % save_interval == 0 or (epoch_i + 1) % opt.lr_drop == 0: # additional copies checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", f"_e{epoch_i:04d}.ckpt")) if opt.debug: break tb_writer.close() def start_training(): logger.info("Setup config, data and model...") opt = BaseOptions().parse() set_seed(opt.seed) if opt.debug: # keep the model run deterministically # 'cudnn.benchmark = True' enabled auto finding the best algorithm for a specific input/net config. # Enable this only when input size is fixed. cudnn.benchmark = False cudnn.deterministic = True dataset_config = dict( dset_name=opt.dset_name, data_path=opt.train_path, v_feat_dirs=opt.v_feat_dirs, q_feat_dir=opt.t_feat_dir, q_feat_type="last_hidden_state", max_q_l=opt.max_q_l, max_v_l=opt.max_v_l, ctx_mode=opt.ctx_mode, data_ratio=opt.data_ratio, normalize_v=not opt.no_norm_vfeat, normalize_t=not opt.no_norm_tfeat, clip_len=opt.clip_length, max_windows=opt.max_windows, span_loss_type=opt.span_loss_type, txt_drop_ratio=opt.txt_drop_ratio, dset_domain=opt.dset_domain, ) dataset_config["data_path"] = opt.train_path train_dataset = StartEndDataset(**dataset_config) if opt.eval_path is not None: dataset_config["data_path"] = opt.eval_path dataset_config["txt_drop_ratio"] = 0 dataset_config["q_feat_dir"] = opt.t_feat_dir.replace("sub_features", "text_features") # for pretraining # dataset_config["load_labels"] = False # uncomment to calculate eval loss eval_dataset = StartEndDataset(**dataset_config) else: eval_dataset = None model, criterion, optimizer, lr_scheduler = setup_model(opt) logger.info(f"Model {model}") count_parameters(model) logger.info("Start Training...") # For tvsum dataset, use train_hl function if opt.dset_name in ['tvsum', 'youtube_uni']: train_hl(model, criterion, optimizer, lr_scheduler, train_dataset, eval_dataset, opt) else: train(model, criterion, optimizer, lr_scheduler, train_dataset, eval_dataset, opt) return opt.ckpt_filepath.replace(".ckpt", "_best.ckpt"), opt.eval_split_name, opt.eval_path, opt.debug, opt if __name__ == '__main__': best_ckpt_path, eval_split_name, eval_path, debug, opt = start_training() if not debug: input_args = ["--resume", best_ckpt_path, "--eval_split_name", eval_split_name, "--eval_path", eval_path] sys.argv[1:] = input_args logger.info("\n\n\nFINISHED TRAINING!!!") logger.info("Evaluating model at {}".format(best_ckpt_path)) logger.info("Input args {}".format(sys.argv[1:]))
start_inference(opt)
5
2023-11-10 12:45:25+00:00
24k
ej0cl6/TextEE
TextEE/models/OneIE/E2Etrainer.py
[ { "identifier": "BasicTrainer", "path": "TextEE/models/trainer.py", "snippet": "class BasicTrainer(object):\n def __init__(self, config, type_set=None):\n self.config = config\n self.type_set = type_set\n \n @classmethod\n def add_extra_info_fn(cls, instances, raw_data, con...
import os, sys, logging, tqdm, pprint, copy import torch import numpy as np import ipdb from transformers import (BertTokenizer, RobertaTokenizer, XLMRobertaTokenizer, AutoTokenizer, AdamW, get_linear_schedule_with_warmup) from torch.utils.data import DataLoader from torch.optim import AdamW from ..trainer import BasicTrainer from .E2Emodel import OneIEE2EModel from .data import IEDataset from .util import generate_vocabs, load_valid_patterns, save_result, best_score_by_task from .scorer import score_graphs from scorer import compute_f1, print_scores
16,322
def load_model_(self, checkpoint=None): assert self.tokenizer if checkpoint: logger.info(f"Loading model from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.state"), map_location=f'cuda:{self.config.gpu_device}') self.vocabs = state["vocabs"] self.type_set = state["type_set"] self.valid_patterns = state["valid_patterns"] self.model = OneIEE2EModel(self.config, self.vocabs, self.valid_patterns) self.model.load_state_dict(state['model']) self.model.cuda(device=self.config.gpu_device) else: self.valid_patterns = load_valid_patterns(self.config.valid_pattern_path, self.vocabs) self.model = OneIEE2EModel(self.config, self.vocabs, self.valid_patterns) self.model.cuda(device=self.config.gpu_device) def load_model(self, checkpoint=None): self.load_tokenizer_(checkpoint=checkpoint) self.load_model_(checkpoint=checkpoint) def train(self, train_data, dev_data, **kwargs): self.load_tokenizer_() train_set = IEDataset(train_data, self.tokenizer, max_length=self.config.max_length, gpu=True, relation_mask_self=self.config.relation_mask_self, relation_directional=self.config.relation_directional, symmetric_relations=self.config.symmetric_relations, test=False ) dev_set = IEDataset(dev_data, self.tokenizer, max_length=self.config.max_length, gpu=True, relation_mask_self=self.config.relation_mask_self, relation_directional=self.config.relation_directional, symmetric_relations=self.config.symmetric_relations, test=False ) self.vocabs = generate_vocabs([train_set, dev_set]) train_set.numberize(self.tokenizer, self.vocabs) dev_set.numberize(self.tokenizer, self.vocabs) self.load_model_() batch_num = len(train_set) // self.config.batch_size + (len(train_set) % self.config.batch_size != 0) dev_batch_num = len(dev_set) // self.config.eval_batch_size + (len(dev_set) % self.config.eval_batch_size != 0) param_groups = [ { 'params': [p for n, p in self.model.named_parameters() if n.startswith('bert')], 'lr': self.config.bert_learning_rate, 'weight_decay': self.config.bert_weight_decay }, { 'params': [p for n, p in self.model.named_parameters() if not n.startswith('bert') and 'crf' not in n and 'global_feature' not in n], 'lr': self.config.learning_rate, 'weight_decay': self.config.weight_decay }, { 'params': [p for n, p in self.model.named_parameters() if not n.startswith('bert') and ('crf' in n or 'global_feature' in n)], 'lr': self.config.learning_rate, 'weight_decay': 0 } ] optimizer = AdamW(params=param_groups) schedule = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=batch_num*self.config.warmup_epoch, num_training_steps=batch_num*self.config.max_epoch) best_scores = {self.config.target_task: {"f": 0.0}} best_epoch = -1 target_task = self.config.target_task logger.info('================Start Training================') for epoch in range(self.config.max_epoch): logger.info('Epoch: {}'.format(epoch)) # training step progress = tqdm.tqdm(total=batch_num, ncols=75, desc='Train {}'.format(epoch)) optimizer.zero_grad() cummulate_loss = 0. for batch_idx, batch in enumerate(DataLoader( train_set, batch_size=self.config.batch_size // self.config.accumulate_step, shuffle=True, drop_last=False, collate_fn=train_set.collate_fn)): loss = self.model(batch) loss = loss * (1 / self.config.accumulate_step) cummulate_loss += loss loss.backward() if (batch_idx + 1) % self.config.accumulate_step == 0: progress.update(1) torch.nn.utils.clip_grad_norm_( self.model.parameters(), self.config.grad_clipping) optimizer.step() schedule.step() optimizer.zero_grad() progress.close() logger.info({"average training loss": (cummulate_loss / batch_idx).data}) # dev set progress = tqdm.tqdm(total=dev_batch_num, ncols=75, desc='Dev {}'.format(epoch)) best_dev_role_model = False dev_gold_graphs, dev_pred_graphs, dev_tokens, dev_wnd_ids = [], [], [], [] for batch in DataLoader(dev_set, batch_size=self.config.eval_batch_size, shuffle=False, collate_fn=dev_set.collate_fn): progress.update(1) graphs = self.model.predict(batch, gold_tri=False) gold_graph = copy.deepcopy(batch.graphs) for graph in graphs: graph.clean(relation_directional=self.config.relation_directional, symmetric_relations=self.config.symmetric_relations) dev_gold_graphs.extend(gold_graph) dev_pred_graphs.extend(graphs) dev_tokens.extend(batch.tokens) dev_wnd_ids.extend(batch.wnd_ids) progress.close()
logger = logging.getLogger(__name__) class OneIEE2ETrainer(BasicTrainer): def __init__(self, config, type_set=None): super().__init__(config, type_set) self.tokenizer = None self.model = None self.valid_patterns = None @classmethod def add_extra_info_fn(cls, instances, raw_data, config): extra_info_map = {} for dt in raw_data: extra_info = { "entity_mentions": dt["entity_mentions"] if "entity_mentions" in dt else [], "relation_mentions": dt["relation_mentions"] if "relation_mentions" in dt else [], "event_mentions": dt["event_mentions"] if "event_mentions" in dt else [], } extra_info_map[(dt["doc_id"], dt["wnd_id"])] = extra_info for instance in instances: instance["extra_info"] = extra_info_map[(instance["doc_id"], instance["wnd_id"])] return instances def load_tokenizer_(self, checkpoint=None): if checkpoint: logger.info(f"Loading tokenizer from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.tokenizer")) self.tokenizer = state["tokenizer"] else: logger.info(f"Loading tokenizer from {self.config.pretrained_model_name}") if self.config.pretrained_model_name.startswith('bert-'): self.tokenizer = BertTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) elif self.config.pretrained_model_name.startswith('roberta-'): self.tokenizer = RobertaTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) elif self.config.pretrained_model_name.startswith('xlm-roberta-'): self.tokenizer = XLMRobertaTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) else: self.tokenizer = AutoTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir, do_lower_case=False) def load_model_(self, checkpoint=None): assert self.tokenizer if checkpoint: logger.info(f"Loading model from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.state"), map_location=f'cuda:{self.config.gpu_device}') self.vocabs = state["vocabs"] self.type_set = state["type_set"] self.valid_patterns = state["valid_patterns"] self.model = OneIEE2EModel(self.config, self.vocabs, self.valid_patterns) self.model.load_state_dict(state['model']) self.model.cuda(device=self.config.gpu_device) else: self.valid_patterns = load_valid_patterns(self.config.valid_pattern_path, self.vocabs) self.model = OneIEE2EModel(self.config, self.vocabs, self.valid_patterns) self.model.cuda(device=self.config.gpu_device) def load_model(self, checkpoint=None): self.load_tokenizer_(checkpoint=checkpoint) self.load_model_(checkpoint=checkpoint) def train(self, train_data, dev_data, **kwargs): self.load_tokenizer_() train_set = IEDataset(train_data, self.tokenizer, max_length=self.config.max_length, gpu=True, relation_mask_self=self.config.relation_mask_self, relation_directional=self.config.relation_directional, symmetric_relations=self.config.symmetric_relations, test=False ) dev_set = IEDataset(dev_data, self.tokenizer, max_length=self.config.max_length, gpu=True, relation_mask_self=self.config.relation_mask_self, relation_directional=self.config.relation_directional, symmetric_relations=self.config.symmetric_relations, test=False ) self.vocabs = generate_vocabs([train_set, dev_set]) train_set.numberize(self.tokenizer, self.vocabs) dev_set.numberize(self.tokenizer, self.vocabs) self.load_model_() batch_num = len(train_set) // self.config.batch_size + (len(train_set) % self.config.batch_size != 0) dev_batch_num = len(dev_set) // self.config.eval_batch_size + (len(dev_set) % self.config.eval_batch_size != 0) param_groups = [ { 'params': [p for n, p in self.model.named_parameters() if n.startswith('bert')], 'lr': self.config.bert_learning_rate, 'weight_decay': self.config.bert_weight_decay }, { 'params': [p for n, p in self.model.named_parameters() if not n.startswith('bert') and 'crf' not in n and 'global_feature' not in n], 'lr': self.config.learning_rate, 'weight_decay': self.config.weight_decay }, { 'params': [p for n, p in self.model.named_parameters() if not n.startswith('bert') and ('crf' in n or 'global_feature' in n)], 'lr': self.config.learning_rate, 'weight_decay': 0 } ] optimizer = AdamW(params=param_groups) schedule = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=batch_num*self.config.warmup_epoch, num_training_steps=batch_num*self.config.max_epoch) best_scores = {self.config.target_task: {"f": 0.0}} best_epoch = -1 target_task = self.config.target_task logger.info('================Start Training================') for epoch in range(self.config.max_epoch): logger.info('Epoch: {}'.format(epoch)) # training step progress = tqdm.tqdm(total=batch_num, ncols=75, desc='Train {}'.format(epoch)) optimizer.zero_grad() cummulate_loss = 0. for batch_idx, batch in enumerate(DataLoader( train_set, batch_size=self.config.batch_size // self.config.accumulate_step, shuffle=True, drop_last=False, collate_fn=train_set.collate_fn)): loss = self.model(batch) loss = loss * (1 / self.config.accumulate_step) cummulate_loss += loss loss.backward() if (batch_idx + 1) % self.config.accumulate_step == 0: progress.update(1) torch.nn.utils.clip_grad_norm_( self.model.parameters(), self.config.grad_clipping) optimizer.step() schedule.step() optimizer.zero_grad() progress.close() logger.info({"average training loss": (cummulate_loss / batch_idx).data}) # dev set progress = tqdm.tqdm(total=dev_batch_num, ncols=75, desc='Dev {}'.format(epoch)) best_dev_role_model = False dev_gold_graphs, dev_pred_graphs, dev_tokens, dev_wnd_ids = [], [], [], [] for batch in DataLoader(dev_set, batch_size=self.config.eval_batch_size, shuffle=False, collate_fn=dev_set.collate_fn): progress.update(1) graphs = self.model.predict(batch, gold_tri=False) gold_graph = copy.deepcopy(batch.graphs) for graph in graphs: graph.clean(relation_directional=self.config.relation_directional, symmetric_relations=self.config.symmetric_relations) dev_gold_graphs.extend(gold_graph) dev_pred_graphs.extend(graphs) dev_tokens.extend(batch.tokens) dev_wnd_ids.extend(batch.wnd_ids) progress.close()
dev_scores = score_graphs(dev_gold_graphs, dev_pred_graphs, self.vocabs['event_type'])
7
2023-11-15 21:32:56+00:00
24k
ahayler/s4c
models/bts/trainer_overfit.py
[ { "identifier": "make_datasets", "path": "datasets/data_util.py", "snippet": "def make_datasets(config):\n type = config.get(\"type\", \"KITTI_Raw\")\n if type == \"KITTI_Odometry\":\n train_dataset = KittiOdometryDataset(\n base_path=config[\"data_path\"],\n frame_cou...
import math import ignite.distributed as idist import torch import numpy as np from copy import copy from typing import Optional, Union, Iterable, Sequence from ignite.contrib.handlers import TensorboardLogger from ignite.engine import Engine from matplotlib import pyplot as plt from torch import optim, nn from torch.utils.data import DataLoader, Dataset, Sampler from torch.utils.data.dataloader import T_co, _collate_fn_t, _worker_init_fn_t from torchvision.utils import make_grid from datasets.data_util import make_datasets from models.common.model.scheduler import make_scheduler from models.common.render import NeRFRenderer from models.bts.model.loss import ReconstructionLoss from models.bts.trainer import get_metrics, BTSWrapper, BTSNet from scripts.inference_setup import render_profile from utils.array_operations import map_fn, unsqueezer, to from utils.base_trainer import base_training from utils.plotting import color_tensor, color_segmentation_tensor
15,653
class EncoderDummy(nn.Module): def __init__(self, size, feat_dim, num_views=1) -> None: super().__init__() self.feats = nn.Parameter(torch.randn(num_views, feat_dim, *size)) self.latent_size = feat_dim def forward(self, x): n = x.shape[0] return [self.feats.expand(n, -1, -1, -1)] class DataloaderDummy(DataLoader): def __init__(self, dataset: Dataset[T_co], batch_size: Optional[int] = 1, shuffle: Optional[bool] = None, sampler: Union[Sampler, Iterable, None] = None, batch_sampler: Union[Sampler[Sequence], Iterable[Sequence], None] = None, num_workers: int = 0, collate_fn: Optional[_collate_fn_t] = None, pin_memory: bool = False, drop_last: bool = False, timeout: float = 0, worker_init_fn: Optional[_worker_init_fn_t] = None, multiprocessing_context=None, generator=None, *, prefetch_factor: int = 2, persistent_workers: bool = False, pin_memory_device: str = ""): super().__init__(dataset, batch_size, shuffle, sampler, batch_sampler, num_workers, collate_fn, pin_memory, drop_last, timeout, worker_init_fn, multiprocessing_context, generator, prefetch_factor=prefetch_factor, persistent_workers=persistent_workers, pin_memory_device=pin_memory_device) self.element = to(map_fn(map_fn(dataset.__getitem__(0), torch.tensor), unsqueezer), "cuda:0") def _get_iterator(self): return iter([self.element]) def __iter__(self): return super().__iter__() def __len__(self) -> int: return 1
class EncoderDummy(nn.Module): def __init__(self, size, feat_dim, num_views=1) -> None: super().__init__() self.feats = nn.Parameter(torch.randn(num_views, feat_dim, *size)) self.latent_size = feat_dim def forward(self, x): n = x.shape[0] return [self.feats.expand(n, -1, -1, -1)] class DataloaderDummy(DataLoader): def __init__(self, dataset: Dataset[T_co], batch_size: Optional[int] = 1, shuffle: Optional[bool] = None, sampler: Union[Sampler, Iterable, None] = None, batch_sampler: Union[Sampler[Sequence], Iterable[Sequence], None] = None, num_workers: int = 0, collate_fn: Optional[_collate_fn_t] = None, pin_memory: bool = False, drop_last: bool = False, timeout: float = 0, worker_init_fn: Optional[_worker_init_fn_t] = None, multiprocessing_context=None, generator=None, *, prefetch_factor: int = 2, persistent_workers: bool = False, pin_memory_device: str = ""): super().__init__(dataset, batch_size, shuffle, sampler, batch_sampler, num_workers, collate_fn, pin_memory, drop_last, timeout, worker_init_fn, multiprocessing_context, generator, prefetch_factor=prefetch_factor, persistent_workers=persistent_workers, pin_memory_device=pin_memory_device) self.element = to(map_fn(map_fn(dataset.__getitem__(0), torch.tensor), unsqueezer), "cuda:0") def _get_iterator(self): return iter([self.element]) def __iter__(self): return super().__iter__() def __len__(self) -> int: return 1
class BTSWrapperOverfit(BTSWrapper):
4
2023-11-12 21:53:27+00:00
24k
newcastleuniversity/DISPEL
dispel/providers/generic/sensor.py
[ { "identifier": "Reading", "path": "dispel/data/core.py", "snippet": "class Reading(FlagMixIn):\n \"\"\"A data capture from an experiment.\n\n Attributes\n ----------\n evaluation\n The evaluation information for this reading\n session\n The session information for this read...
from functools import partial from typing import Iterable, List, Optional, Tuple, Union from dispel.data.core import Reading from dispel.data.levels import Level from dispel.data.measures import MeasureValueDefinitionPrototype from dispel.data.raw import ( ACCELEROMETER_COLUMNS, DEFAULT_COLUMNS, GRAVITY_COLUMNS, RawDataValueDefinition, ) from dispel.data.values import AbbreviatedValue as AV from dispel.processing.assertions import NotEmptyDataSetAssertionMixin from dispel.processing.data_set import transformation from dispel.processing.extract import ExtractMultipleStep, ExtractStep from dispel.processing.level import LevelFilterType from dispel.processing.modalities import SensorModality from dispel.processing.transform import Apply, TransformStep from dispel.providers.bdh.data import BDHReading from dispel.signal.accelerometer import ( GRAVITY_CONSTANT, apply_rotation_matrices, compute_rotation_matrices_quaternion, remove_gravity_component, remove_gravity_component_ori, ) from dispel.signal.core import ( amplitude, discretize_sampling_frequency, energy, entropy, euclidean_norm, peak, ) from dispel.signal.sensor import SENSOR_UNIT, find_zero_crossings import numpy as np import pandas as pd
16,999
for axis in "XYZ" ] + [ RawDataValueDefinition( f"gravity{axis}", f"gravity component along the {axis} axis.", data_type="float", ) for axis in "XYZ" ] + [RawDataValueDefinition("ts", "time index")] ) @staticmethod def add_gravity( accelerometer: pd.DataFrame, level: Level, gravity: Optional[pd.DataFrame] = None, ) -> pd.DataFrame: """Format gravity data to ADS format.""" if gravity is None: cols = ["x", "y", "z"] raw_acc = level.get_raw_data_set("raw_accelerometer").data accelerometer = raw_acc if level.has_raw_data_set("attitude"): ori = level.get_raw_data_set("attitude").data ori_cols = ["w", "x", "y", "z"] lin_accelerometer, gravity = remove_gravity_component_ori( accelerometer[cols].values, ori[ori_cols].values ) lin_accelerometer = pd.DataFrame(lin_accelerometer, columns=cols) gravity = pd.DataFrame(gravity, columns=cols) else: lin_accelerometer, gravity = remove_gravity_component( accelerometer[cols] ) res = pd.DataFrame( { "userAccelerationX": lin_accelerometer["x"], "userAccelerationY": lin_accelerometer["y"], "userAccelerationZ": lin_accelerometer["z"], } ) res["gravityX"] = gravity["x"] res["gravityY"] = gravity["y"] res["gravityZ"] = gravity["z"] res["ts"] = accelerometer["ts"] else: # Merging on the timestamps vs. on the indexes acc_renamed = accelerometer.rename( mapper={ "x": "userAccelerationX", "y": "userAccelerationY", "z": "userAccelerationZ", }, axis=1, ) gravity_renamed = gravity.rename( mapper={"x": "gravityX", "y": "gravityY", "z": "gravityZ"}, axis=1 ) merged = acc_renamed.merge(gravity_renamed, how="outer") merged = merged.set_index("ts") merged_sorted = merged.sort_index() merged_sorted_interpolated = merged_sorted.interpolate( method="nearest", limit_direction="both" ) res = merged_sorted_interpolated.loc[acc_renamed.ts].reset_index() return res.dropna() @staticmethod @transformation def _reformat(accelerometer: pd.DataFrame, level: Level) -> pd.DataFrame: target_cols = { f"{sensor}{axis}" for sensor in ("userAcceleration", "gravity") for axis in "XYZ" } if not target_cols.issubset(accelerometer.columns): try: return TransformUserAcceleration.add_gravity( accelerometer, level, level.get_raw_data_set("gravity").data ) except ValueError: # Happens in BDH pinch return TransformUserAcceleration.add_gravity(accelerometer, level) return accelerometer class TransformGyroscope(TransformStep): r"""Format gyroscope data to ADS format if not already the case. On ADS format, the gyroscope is synchronized with the accelerometer. Here we make sure gyroscope is synchronized with the acc data set. Parameters ---------- level_filter An optional :class:`dispel.processing.level.LevelFilter` to determine the levels to be transformed. If no filter is provided, all levels will be transformed. The ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\ s and lists of either and passes them to a :class:`~dispel.processing.level.LevelFilter` for convenience. """ data_set_ids = ["acc", "gyroscope"] new_data_set_id = "gyroscope" definitions = [ RawDataValueDefinition( axis, f"Rotation speed along the {axis} axis.", data_type="float" ) for axis in "xyz" ] + [RawDataValueDefinition("ts", "time index")] @staticmethod @transformation def _synchronize_gyroscope( accelerometer: pd.DataFrame, gyroscope: pd.DataFrame, reading: Reading ) -> pd.DataFrame:
"""Generic functionality for signal processing steps.""" # Define expected sampling frequencies FREQ_20HZ = 20 FREQ_50HZ = 50 FREQ_60HZ = 60 FREQ_100HZ = 100 # SensorLog can sample at 100Hz FREQ_128HZ = 128 # APDM files are sampled at 128Hz VALID_FREQ_LIST = [FREQ_20HZ, FREQ_50HZ, FREQ_100HZ, FREQ_128HZ] class RenameColumns(TransformStep): r"""Rename and select columns of a raw data set. Parameters ---------- data_set_id The data set id of the time series to be renamed. level_filter An optional :class:`~dispel.processing.level.LevelFilter` to determine the levels to be transformed. If no filter is provided, all levels will be transformed. The ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\ s and lists of either and passes them to a :class:`~dispel.processing.level.LevelFilter` for convenience. kwargs All arguments passed into this class will serve as a renaming mapping for the raw data set. """ def __init__( self, data_set_id: str, level_filter: Optional[LevelFilterType] = None, **kwargs ): def _transform_function(data: pd.DataFrame) -> pd.DataFrame: data_ = data.rename(columns=kwargs) return data_[kwargs.values()] super().__init__( data_set_id, _transform_function, f"{data_set_id}_renamed", [RawDataValueDefinition(column, column) for column in kwargs.values()], level_filter=level_filter, ) class SetTimestampIndex(TransformStep): r"""Create a new time series based on a date time or time delta column. Parameters ---------- data_set_id The data set id of the time series to be transformed. columns The columns to consider in the new raw data set. time_stamp_column The time series column name to use as index. level_filter An optional :class:`dispel.processing.level.LevelFilter` to determine the levels to be transformed. If no filter is provided, all levels will be transformed. The ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\ s and lists of either and passes them to a :class:`~dispel.processing.level.LevelFilter` for convenience. duplicates The strategy used to handle duplicates. Has to be one of ``ignore``, ``raise``, ``first``, ``last``. """ def __init__( self, data_set_id: str, columns: List[str], time_stamp_column: str = "ts", level_filter: Optional[LevelFilterType] = None, duplicates: Optional[str] = None, ): def _transform_function( data: pd.DataFrame, rm_duplicate: Optional[str] ) -> pd.DataFrame: if rm_duplicate is None: return data.set_index(time_stamp_column)[columns].copy() res = data.set_index(time_stamp_column)[columns].copy() return res[~res.index.duplicated(keep=duplicates)] super().__init__( data_set_id, lambda x: _transform_function(x, duplicates), f"{data_set_id}_ts", [RawDataValueDefinition(column, column) for column in columns], level_filter=level_filter, ) class Trim(TransformStep): """Trim a sensor signal at the beginning and/or end. Parameters ---------- trim_left The amount of data to trim from the left side of the sensor readings. trim_right The amount of data to trim from the right side of the sensor readings. ts_column The column id to be used in the provided raw data set through ``data_set_ids``. If no column is provided, the data set is expected to have a time-based index that is used to trim the data set. """ trim_left = pd.Timedelta(0) trim_right = pd.Timedelta(0) ts_column: Optional[str] = None def __init__(self, *args, **kwargs): if (left := kwargs.pop("trim_left", None)) is not None: self.trim_left = left if (right := kwargs.pop("trim_right", None)) is not None: self.trim_right = right if (column := kwargs.pop("ts_column", None)) is not None: self.ts_column = column super().__init__(*args, **kwargs) @transformation def _trim(self, data: pd.DataFrame) -> pd.DataFrame: ts_col = data.index if self.ts_column is None else data[self.ts_column] if self.trim_left > pd.Timedelta(0): data = data[ts_col > ts_col.min() + self.trim_left] if self.trim_right > pd.Timedelta(0): data = data[ts_col < ts_col.max() - self.trim_right] return data.copy() class Resample(NotEmptyDataSetAssertionMixin, TransformStep): r"""Resample a time-based raw data set to a specific sampling frequency. The resampling creates a new raw data set which is accessible via the data set comprised of the original one concatenated with ``_resampled``. Parameters ---------- data_set_id The data set to be resampled. This has to be a data set that uses a time-based index. You might first have to apply the :class:`SetTimestampIndex` processing step before you can apply this step. aggregations A list of resampling methods to be applied in order. Each can be any method that is also accepted by :meth:`pandas.DataFrame.agg`. columns The columns to be considered during the resampling. freq The frequency to resample to. See also :meth:`pandas.DataFrame.resample` for details. If freq is not provided the frequency is estimated automatically taking the median frequency. max_frequency_distance An optional integer specifying the maximum accepted distance between the expected frequency and the estimated frequency above which we raise an error. level_filter An optional :class:`dispel.processing.level.LevelFilter` to determine the levels to be transformed. If no filter is provided, all levels will be transformed. The ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\ s and lists of either and passes them to a :class:`~dispel.processing.level.LevelFilter` for convenience. """ def __init__( self, data_set_id: str, aggregations: Iterable[str], columns: Iterable[str], freq: Optional[Union[float, str]] = None, max_frequency_distance: Optional[int] = None, level_filter: Optional[LevelFilterType] = None, ): def _resample( data: pd.DataFrame, sampling_frequency: Optional[Union[float, str]] = None ) -> pd.DataFrame: # Check if a sampling frequency is provided # If not, we discretized the sampling frequency if sampling_frequency is None: discretize_args = [data, VALID_FREQ_LIST] if max_frequency_distance: discretize_args.append(max_frequency_distance) sampling_frequency = discretize_sampling_frequency(*discretize_args) # Convert the float sampling frequency to a Timedelta format if not isinstance(sampling_frequency, str): sampling_frequency = pd.Timedelta(1 / sampling_frequency, unit="s") resample_obj = data[columns].resample(sampling_frequency) for method in aggregations: resample_obj = resample_obj.agg(method) return resample_obj def _definition_factory(column: str) -> RawDataValueDefinition: return RawDataValueDefinition( column, f"{column} resampled with {aggregations}" ) super().__init__( data_set_id, partial(_resample, sampling_frequency=freq), f"{data_set_id}_resampled", [_definition_factory(column) for column in columns], level_filter=level_filter, ) class Upsample(Apply): r"""Upsample a time-based raw data set to a specific sampling frequency. The upsampling creates a new raw data set which is an upsampled version of the original data set identified by data_set_id. The upsampled data set is accessible via the new_data_set_id which is a concatenation of the original data_set_id and a suffix ``_upsampled``. Parameters ---------- interpolation_method Interpolation technique to use to fill NaN values. It should be a method that is also accepted by :meth:`pandas.DataFrame.interpolate`. freq The frequency to upsample to. See also :meth:`pandas.DataFrame.resample` for details. """ def get_new_data_set_id(self) -> str: """Overwrite new_data_set_id.""" return f"{self.get_data_set_ids()[0]}_upsampled" # type: ignore def __init__(self, interpolation_method: str, freq: Union[float, str], **kwargs): def _upsample( data: pd.DataFrame, sampling_frequency: Union[float, str] ) -> pd.DataFrame: """Upsample a dataframe to a given sampling frequency.""" # Convert the float sampling frequency to a Timedelta format if not isinstance(sampling_frequency, str): sampling_frequency = pd.Timedelta(1 / sampling_frequency, unit="s") resample_obj = data.resample(sampling_frequency) return resample_obj.interpolate(interpolation_method) super().__init__( method=_upsample, method_kwargs={"sampling_frequency": freq}, **kwargs ) class ExtractAverageSignalEnergy(NotEmptyDataSetAssertionMixin, ExtractStep): r"""An average signal energy extraction step. Parameters ---------- sensor The type of sensor on which the extraction is to be performed. data_set_id The data set id on which the extraction is to be performed. columns The columns onto which the signal energy is to be computed. level_filter An optional :class:`~dispel.processing.level.LevelFilter` to determine the levels to be transformed. If no filter is provided, all levels will be transformed. The ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\ s and lists of either and passes them to a :class:`~dispel.processing.level.LevelFilter` for convenience. """ def __init__( self, sensor: SensorModality, data_set_id: str, columns: List[str], level_filter: Optional[LevelFilterType] = None, ): def _average_signal(data: pd.DataFrame): return np.linalg.norm(data[columns], ord=2) super().__init__( data_set_id, _average_signal, definition=MeasureValueDefinitionPrototype( measure_name=AV(f"average {sensor} energy", f"{sensor.abbr}_sig_ene"), data_type="float64", description=f"The average {sensor} energy of the " f'{"".join(columns)} columns of the signal.', unit=SENSOR_UNIT[sensor.abbr], ), level_filter=level_filter, ) class ExtractPowerSpectrumMeasures(NotEmptyDataSetAssertionMixin, ExtractMultipleStep): r"""A measure extraction processing step for power spectrum measures. Parameters ---------- sensor The type of sensor on which the extraction is to be performed. data_set_id The data set id on which the extraction is to be performed. columns The columns onto which the power spectrum measures are to be extracted. lower_bound The lower bound of frequencies below which the signal is filtered. upper_bound The higher bound of frequencies above which the signal is filtered. level_filter An optional :class:`~dispel.processing.level.LevelFilter` to determine the levels to be transformed. If no filter is provided, all levels will be transformed. The ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\ s and lists of either and passes them to a :class:`~dispel.processing.level.LevelFilter` for convenience. """ def __init__( self, sensor: SensorModality, data_set_id: str, columns: List[str], lower_bound: Optional[float] = None, upper_bound: Optional[float] = None, level_filter: Optional[LevelFilterType] = None, ): unit = sensor.unit(order=2) atomic_functions = [ { "func": partial(energy, lowcut=lower_bound, highcut=upper_bound), "name": AV("energy", "ene"), "description": "The power spectrum energy summed between the " f"frequencies ({lower_bound}, {upper_bound}) " f"of the {{axis}} axis for the {sensor} " f"signal.", "unit": unit, "outcome_uuid": "99ef9a8d-a925-4eb0-9e80-be58cd4a9ac9", }, { "func": peak, "name": AV("peak", "peak"), "description": f"The frequency at which the power spectrum of " "the {axis} axis reaches its maximum value for " f"the {sensor} signal.", "unit": "Hz", "outcome_uuid": "87512c93-3a5b-4c9e-9575-fd9ed19649ca", }, { "func": entropy, "name": AV("entropy", "ent"), "description": "The power spectrum entropy of the {axis} axis " f"for the {sensor} signal.", "unit": unit, "outcome_uuid": "6726bb5a-8084-49f5-a53e-6a28a8f27695", }, { "func": amplitude, "name": AV("amplitude", "amp"), "description": "The power spectrum amplitude (i.e. the maximum" " value) of the {axis} axis for the " f"{sensor} signal.", "unit": unit, "outcome_uuid": "bde2c1f9-abf7-41e7-91f8-e0ddddf34a5c", }, ] def _function_factory(atomic_function, axis): return dict( func=lambda x: atomic_function["func"](x[axis]), description=atomic_function["description"].format(axis=axis), unit=atomic_function["unit"], measure_name=AV( f'{sensor} power spectrum {atomic_function["name"]} {axis}' f" axis", f'{sensor.abbr}_ps_{atomic_function["name"].abbr}_{axis}', ), ) functions = [ _function_factory(atomic_function, axis) for atomic_function in atomic_functions for axis in columns ] super().__init__( data_set_id, functions, definition=MeasureValueDefinitionPrototype(data_type="float64"), level_filter=level_filter, ) class ComputeGravityRotationMatrices(TransformStep): r"""Compute a series of rotation matrices to align sensors to gravity. This transformation step creates a series of rotation matrices based on the gravity information contained in the accelerometer sensor. This allows to rotate other sensors on a desired orientation related to gravity. This is in particular of interest if we want to measure physical interactions with devices around the plane perpendicular to gravity. Parameters ---------- target_gravity The target gravity vector, e.g. ``(-1, 0, 0)`` to create rotation matrices that rotate the x-axis of a device onto gravity. level_filter An optional :class:`~dispel.processing.level.LevelFilter` to determine the levels to be transformed. If no filter is provided, all levels will be transformed. The ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\ s and lists of either and passes them to a :class:`~dispel.processing.level.LevelFilter` for convenience. """ def __init__( self, data_set_id: str, target_gravity: Tuple[float, float, float], **kwargs ): def _transform_function(data: pd.DataFrame) -> pd.Series: return compute_rotation_matrices_quaternion( data[GRAVITY_COLUMNS], target_gravity ) super().__init__( data_set_id, _transform_function, "gravity_rotation_matrices", [RawDataValueDefinition("rotation_matrix", "Rotation Matrix")], **kwargs, ) class RotateSensorWithGravityRotationMatrices(TransformStep): r"""Apply a series of rotation matrices to a sensor. This is a complementary step to :class:`ComputeGravityRotationMatrices` and applies the rotation matrices to the specified sensor. Parameters ---------- data_set_id The id of the sensor data set to be rotated. columns The columns of the sensor data set to be considered in the rotation. level_filter An optional :class:`~dispel.processing.level.LevelFilter` to determine the levels to be transformed. If no filter is provided, all levels will be transformed. The ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\ s and lists of either and passes them to a :class:`~dispel.processing.level.LevelFilter` for convenience. Examples -------- Assuming you want to rotate the gyroscope vector onto gravity you can achieve this by chaining the following steps: .. doctest:: processing >>> from dispel.data.raw import DEFAULT_COLUMNS >>> from dispel.processing import process >>> from dispel.providers.generic.sensor import ( ... ComputeGravityRotationMatrices, ... RotateSensorWithGravityRotationMatrices ... ) >>> cols = DEFAULT_COLUMNS >>> steps = [ ... ComputeGravityRotationMatrices('accelerometer', (-1, 0, 0)), ... RotateSensorWithGravityRotationMatrices('gyroscope', cols) ... ] >>> _ = process(reading, steps) # doctest: +SKIP The results of the roation are available in the raw data set with the id ``<data_set_id>_rotated``: .. doctest:: processing :options: +NORMALIZE_WHITESPACE >>> level = reading.get_level(level_id) # doctest: +SKIP >>> level.get_raw_data_set('gyroscope').data.head() # doctest: +SKIP x y z ts 0 0.035728 -0.021515 0.014879 2020-05-04 17:31:38.574 1 -0.012046 0.005010 -0.009029 2020-05-04 17:31:38.625 2 0.006779 0.000761 -0.003253 2020-05-04 17:31:38.680 3 0.032636 -0.020272 -0.021915 2020-05-04 17:31:38.729 4 0.007495 -0.014061 0.012886 2020-05-04 17:31:38.779 >>> level.get_raw_data_set( ... 'gyroscope_rotated' ... ).data.head() # doctest: +SKIP x y z 0 -0.002309 -0.042509 -0.012182 1 -0.003754 0.014983 0.003624 2 -0.002237 -0.002116 -0.006901 3 -0.030461 -0.021654 -0.023656 4 0.001203 -0.019580 0.005924 """ def __init__( self, data_set_id: str, columns: Iterable[str], level_filter: Optional[LevelFilterType] = None, ): def _transform_function( sensor_df: pd.DataFrame, matrices: pd.DataFrame ) -> pd.DataFrame: return apply_rotation_matrices( matrices["rotation_matrix"], sensor_df[columns] ) def _definition_factory(column: str) -> RawDataValueDefinition: return RawDataValueDefinition(column, f"{column} rotated") super().__init__( [data_set_id, "gravity_rotation_matrices"], _transform_function, f"{data_set_id}_rotated", [_definition_factory(column) for column in columns], level_filter=level_filter, ) class TransformUserAcceleration(TransformStep): r"""Format accelerometer data to ADS format if not already the case. Prior to formatting, linear acceleration and gravity are decoupled from acceleration. Parameters ---------- level_filter An optional :class:`dispel.processing.level.LevelFilter` to determine the levels to be transformed. If no filter is provided, all levels will be transformed. The ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\ s and lists of either and passes them to a :class:`~dispel.processing.level.LevelFilter` for convenience. """ data_set_ids = "accelerometer" new_data_set_id = "acc" definitions = ( [ RawDataValueDefinition( f"userAcceleration{axis}", f"Linear Acceleration along the {axis} axis.", data_type="float", ) for axis in "XYZ" ] + [ RawDataValueDefinition( f"gravity{axis}", f"gravity component along the {axis} axis.", data_type="float", ) for axis in "XYZ" ] + [RawDataValueDefinition("ts", "time index")] ) @staticmethod def add_gravity( accelerometer: pd.DataFrame, level: Level, gravity: Optional[pd.DataFrame] = None, ) -> pd.DataFrame: """Format gravity data to ADS format.""" if gravity is None: cols = ["x", "y", "z"] raw_acc = level.get_raw_data_set("raw_accelerometer").data accelerometer = raw_acc if level.has_raw_data_set("attitude"): ori = level.get_raw_data_set("attitude").data ori_cols = ["w", "x", "y", "z"] lin_accelerometer, gravity = remove_gravity_component_ori( accelerometer[cols].values, ori[ori_cols].values ) lin_accelerometer = pd.DataFrame(lin_accelerometer, columns=cols) gravity = pd.DataFrame(gravity, columns=cols) else: lin_accelerometer, gravity = remove_gravity_component( accelerometer[cols] ) res = pd.DataFrame( { "userAccelerationX": lin_accelerometer["x"], "userAccelerationY": lin_accelerometer["y"], "userAccelerationZ": lin_accelerometer["z"], } ) res["gravityX"] = gravity["x"] res["gravityY"] = gravity["y"] res["gravityZ"] = gravity["z"] res["ts"] = accelerometer["ts"] else: # Merging on the timestamps vs. on the indexes acc_renamed = accelerometer.rename( mapper={ "x": "userAccelerationX", "y": "userAccelerationY", "z": "userAccelerationZ", }, axis=1, ) gravity_renamed = gravity.rename( mapper={"x": "gravityX", "y": "gravityY", "z": "gravityZ"}, axis=1 ) merged = acc_renamed.merge(gravity_renamed, how="outer") merged = merged.set_index("ts") merged_sorted = merged.sort_index() merged_sorted_interpolated = merged_sorted.interpolate( method="nearest", limit_direction="both" ) res = merged_sorted_interpolated.loc[acc_renamed.ts].reset_index() return res.dropna() @staticmethod @transformation def _reformat(accelerometer: pd.DataFrame, level: Level) -> pd.DataFrame: target_cols = { f"{sensor}{axis}" for sensor in ("userAcceleration", "gravity") for axis in "XYZ" } if not target_cols.issubset(accelerometer.columns): try: return TransformUserAcceleration.add_gravity( accelerometer, level, level.get_raw_data_set("gravity").data ) except ValueError: # Happens in BDH pinch return TransformUserAcceleration.add_gravity(accelerometer, level) return accelerometer class TransformGyroscope(TransformStep): r"""Format gyroscope data to ADS format if not already the case. On ADS format, the gyroscope is synchronized with the accelerometer. Here we make sure gyroscope is synchronized with the acc data set. Parameters ---------- level_filter An optional :class:`dispel.processing.level.LevelFilter` to determine the levels to be transformed. If no filter is provided, all levels will be transformed. The ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\ s and lists of either and passes them to a :class:`~dispel.processing.level.LevelFilter` for convenience. """ data_set_ids = ["acc", "gyroscope"] new_data_set_id = "gyroscope" definitions = [ RawDataValueDefinition( axis, f"Rotation speed along the {axis} axis.", data_type="float" ) for axis in "xyz" ] + [RawDataValueDefinition("ts", "time index")] @staticmethod @transformation def _synchronize_gyroscope( accelerometer: pd.DataFrame, gyroscope: pd.DataFrame, reading: Reading ) -> pd.DataFrame:
if isinstance(reading, BDHReading):
16
2023-11-14 10:06:46+00:00
24k
Jisencc/yolov5_dual_weighting
segment/val.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=Tr...
import argparse import json import os import subprocess import sys import numpy as np import torch import torch.nn.functional as F from multiprocessing.pool import ThreadPool from pathlib import Path from tqdm import tqdm from models.common import DetectMultiBackend from models.yolo import SegmentationModel from utils.callbacks import Callbacks from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, box_iou from utils.plots import output_to_target, plot_val_study from utils.segment.dataloaders import create_dataloader from utils.segment.general import mask_iou, process_mask, process_mask_native, scale_image from utils.segment.metrics import Metrics, ap_per_class_box_and_mask from utils.segment.plots import plot_images_and_masks from utils.torch_utils import de_parallel, select_device, smart_inference_mode from pycocotools.mask import encode from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
16,967
# Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half = model.fp16 # FP16 supported on limited backends with CUDA nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks if engine: batch_size = model.batch_size else: device = model.device if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1 LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') # Data data = check_dataset(data) # check # Configure model.eval() cuda = device.type != 'cpu' is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() # Dataloader if not training: if pt and not single_cls: # check --weights are trained on --data ncm = model.model.nc assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=rect, workers=workers, prefix=colorstr(f'{task}: '), overlap_mask=overlap, mask_downsample_ratio=mask_downsample_ratio)[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = model.names if hasattr(model, 'names') else model.module.names # get class names if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', 'R', 'mAP50', 'mAP50-95)') dt = Profile(), Profile(), Profile() metrics = Metrics() loss = torch.zeros(4, device=device) jdict, stats = [], [] # callbacks.run('on_val_start') pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar): # callbacks.run('on_val_batch_start') with dt[0]: if cuda: im = im.to(device, non_blocking=True) targets = targets.to(device) masks = masks.to(device) masks = masks.float() im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 nb, _, height, width = im.shape # batch size, channels, height, width # Inference with dt[1]: preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None) # Loss if compute_loss: loss += compute_loss((train_out, protos), targets, masks)[1] # box, obj, cls # NMS targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling with dt[2]: preds = non_max_suppression(preds, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls, max_det=max_det, nm=nm) # Metrics plot_masks = [] # masks for plotting for si, (pred, proto) in enumerate(zip(preds, protos)): labels = targets[targets[:, 0] == si, 1:] nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions path, shape = Path(paths[si]), shapes[si][0] correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init seen += 1 if npr == 0: if nl: stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0])) if plots: confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) continue # Masks midx = [si] if overlap else targets[:, 0] == si gt_masks = masks[midx] pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:]) # Predictions if single_cls: pred[:, 5] = 0 predn = pred.clone()
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Validate a trained YOLOv5 segment model on a segment dataset Usage: $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images) $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate COCO-segments Usage - formats: $ python segment/val.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s-seg_openvino_label # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel yolov5s-seg.pb # TensorFlow GraphDef yolov5s-seg.tflite # TensorFlow Lite yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU yolov5s-seg_paddle_model # PaddlePaddle """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative def save_one_txt(predn, save_conf, shape, file): # Save one txt result gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(file, 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') def save_one_json(predn, jdict, path, class_map, pred_masks): # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} def single_encode(x): rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0] rle['counts'] = rle['counts'].decode('utf-8') return rle image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner pred_masks = np.transpose(pred_masks, (2, 0, 1)) with ThreadPool(NUM_THREADS) as pool: rles = pool.map(single_encode, pred_masks) for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): jdict.append({ 'image_id': image_id, 'category_id': class_map[int(p[5])], 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5), 'segmentation': rles[i]}) def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): """ Return correct prediction matrix Arguments: detections (array[N, 6]), x1, y1, x2, y2, conf, class labels (array[M, 5]), class, x1, y1, x2, y2 Returns: correct (array[N, 10]), for 10 IoU levels """ if masks: if overlap: nl = len(labels) index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes iou = box_iou(labels[:, 1:], detections[:, :4]) correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) correct_class = labels[:, 0:1] == detections[:, 5] for i in range(len(iouv)): x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] correct[matches[:, 1].astype(int), i] = True return torch.tensor(correct, dtype=torch.bool, device=iouv.device) @smart_inference_mode() def run( data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) conf_thres=0.001, # confidence threshold iou_thres=0.6, # NMS IoU threshold max_det=300, # maximum detections per image task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val-seg', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, overlap=False, mask_downsample_ratio=1, compute_loss=None, callbacks=Callbacks(), ): if save_json: check_requirements('pycocotools>=2.0.6') process = process_mask_native # more accurate else: process = process_mask # faster # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() nm = de_parallel(model).model[-1].nm # number of masks else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half = model.fp16 # FP16 supported on limited backends with CUDA nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks if engine: batch_size = model.batch_size else: device = model.device if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1 LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') # Data data = check_dataset(data) # check # Configure model.eval() cuda = device.type != 'cpu' is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() # Dataloader if not training: if pt and not single_cls: # check --weights are trained on --data ncm = model.model.nc assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=rect, workers=workers, prefix=colorstr(f'{task}: '), overlap_mask=overlap, mask_downsample_ratio=mask_downsample_ratio)[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = model.names if hasattr(model, 'names') else model.module.names # get class names if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', 'R', 'mAP50', 'mAP50-95)') dt = Profile(), Profile(), Profile() metrics = Metrics() loss = torch.zeros(4, device=device) jdict, stats = [], [] # callbacks.run('on_val_start') pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar): # callbacks.run('on_val_batch_start') with dt[0]: if cuda: im = im.to(device, non_blocking=True) targets = targets.to(device) masks = masks.to(device) masks = masks.float() im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 nb, _, height, width = im.shape # batch size, channels, height, width # Inference with dt[1]: preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None) # Loss if compute_loss: loss += compute_loss((train_out, protos), targets, masks)[1] # box, obj, cls # NMS targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling with dt[2]: preds = non_max_suppression(preds, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls, max_det=max_det, nm=nm) # Metrics plot_masks = [] # masks for plotting for si, (pred, proto) in enumerate(zip(preds, protos)): labels = targets[targets[:, 0] == si, 1:] nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions path, shape = Path(paths[si]), shapes[si][0] correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init seen += 1 if npr == 0: if nl: stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0])) if plots: confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) continue # Masks midx = [si] if overlap else targets[:, 0] == si gt_masks = masks[midx] pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:]) # Predictions if single_cls: pred[:, 5] = 0 predn = pred.clone()
scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
3
2023-11-12 13:28:26+00:00
24k
cyberark/ark-sdk-python
ark_sdk_python/cli_services/dpa/vm/ark_dpa_vm_policies_editor_service.py
[ { "identifier": "ArkInquirerRender", "path": "ark_sdk_python/args/ark_args_formatter.py", "snippet": "class ArkInquirerRender(ConsoleRender):\n # pylint: disable=keyword-arg-before-vararg,protected-access\n def __init__(self, event_generator=None, *args, **kwargs):\n super().__init__(event_...
from datetime import date, timedelta from typing import Dict, Final, List, Optional from overrides import overrides from ark_sdk_python.args.ark_args_formatter import ArkInquirerRender from ark_sdk_python.auth.ark_isp_auth import ArkISPAuth from ark_sdk_python.cli_services.dpa.common.ark_dpa_base_policies_editor_service import ArkDPABasePoliciesEditorService from ark_sdk_python.models.ark_profile import ArkProfile from ark_sdk_python.models.cli_services.dpa.policies_editor.vm import ArkDPAVMGeneratePolicy from ark_sdk_python.models.common import ArkProtocolType, ArkWorkspaceType from ark_sdk_python.models.services import ArkServiceConfig from ark_sdk_python.models.services.dpa.policies.common import ArkDPADeletePolicy, ArkDPAGetPolicy, ArkDPARuleStatus, ArkDPAUserData from ark_sdk_python.models.services.dpa.policies.vm import ( ArkDPAVMAddPolicy, ArkDPAVMAuthorizationRule, ArkDPAVMAWSProviderData, ArkDPAVMAzureProviderData, ArkDPAVMConnectionDataType, ArkDPAVMConnectionInformation, ArkDPAVMFQDNOperator, ArkDPAVMFQDNRule, ArkDPAVMFQDNRulesConjunction, ArkDPAVMGCPProviderData, ArkDPAVMLocalEphemeralUserConnectionMethodData, ArkDPAVMOnPremProviderData, ArkDPAVMPolicy, ArkDPAVMPolicyListItem, ArkDPAVMProvider, ArkDPAVMRDPLocalEphemeralUserConnectionData, ArkDPAVMUpdatePolicy, ) from ark_sdk_python.services.dpa.policies.vm.ark_dpa_vm_policies_service import ArkDPAVMPoliciesService import inquirer
14,503
SERVICE_CONFIG: Final[ArkServiceConfig] = ArkServiceConfig( service_name='dpa-policies-vm-editor', required_authenticator_names=['isp'], optional_authenticator_names=[] ) DEFAULT_GENERATED_POLICY: Final[ArkDPAVMPolicy] = ArkDPAVMPolicy( policy_name='Default VM Policy', status=ArkDPARuleStatus.Draft, description='Auto generated vm policy', providers_data={}, start_date=date.today().strftime('%Y-%m-%d'), end_date=(date.today() + timedelta(days=7)).strftime('%Y-%m-%d'), user_access_rules=[], ) DEFAULT_GENERATED_AUTHORIZATION_RULE: Final[ArkDPAVMAuthorizationRule] = ArkDPAVMAuthorizationRule( rule_name='Default VM Rule', user_data=ArkDPAUserData(roles=['DpaAdmin'], groups=[], users=[]), connection_information=ArkDPAVMConnectionInformation( connect_as={}, grant_access=2, idle_time=10, days_of_week=[], full_days=True, hours_from='07:00', hours_to='17:00', time_zone='Asia/Jerusalem', ), ) DEFAULT_GENERATED_PROVIDERS: Final[Dict[ArkWorkspaceType, ArkDPAVMProvider]] = { ArkWorkspaceType.AWS: ArkDPAVMAWSProviderData(regions=[], tags=[{'key': 'value'}], vpc_ids=[], account_ids=[]), ArkWorkspaceType.AZURE: ArkDPAVMAzureProviderData( regions=[], tags=[{'key': 'value'}], resource_groups=[], vnet_ids=[], subscriptions=[] ), ArkWorkspaceType.GCP: ArkDPAVMGCPProviderData(regions=[], tags=[{'key': 'value'}], network_ids=[], projects=[]), ArkWorkspaceType.ONPREM: ArkDPAVMOnPremProviderData( fqdn_rules_conjunction=ArkDPAVMFQDNRulesConjunction.OR, fqdn_rules=[ArkDPAVMFQDNRule(operator=ArkDPAVMFQDNOperator.WILDCARD, computername_pattern='*', domain='default.com')], ), } DEFAULT_GENERATED_PROTOCOLS: Final[Dict[ArkProtocolType, ArkDPAVMConnectionDataType]] = { ArkProtocolType.SSH: 'root', ArkProtocolType.RDP: ArkDPAVMRDPLocalEphemeralUserConnectionData( local_ephemeral_user=ArkDPAVMLocalEphemeralUserConnectionMethodData(assign_groups={'Administrators'}) ), } SUPPORTED_SSH_PROTOCOL_PROVIDERS: Final[ArkWorkspaceType] = [ ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM, ] SUPPORTED_RDP_PROTOCOL_PROVIDERS: Final[ArkWorkspaceType] = [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.ONPREM] class ArkDPAVMPoliciesEditorService( ArkDPABasePoliciesEditorService[ArkDPAVMPolicy, ArkDPAVMPolicyListItem, ArkDPAVMAddPolicy, ArkDPAVMUpdatePolicy, ArkDPAVMGeneratePolicy] ):
SERVICE_CONFIG: Final[ArkServiceConfig] = ArkServiceConfig( service_name='dpa-policies-vm-editor', required_authenticator_names=['isp'], optional_authenticator_names=[] ) DEFAULT_GENERATED_POLICY: Final[ArkDPAVMPolicy] = ArkDPAVMPolicy( policy_name='Default VM Policy', status=ArkDPARuleStatus.Draft, description='Auto generated vm policy', providers_data={}, start_date=date.today().strftime('%Y-%m-%d'), end_date=(date.today() + timedelta(days=7)).strftime('%Y-%m-%d'), user_access_rules=[], ) DEFAULT_GENERATED_AUTHORIZATION_RULE: Final[ArkDPAVMAuthorizationRule] = ArkDPAVMAuthorizationRule( rule_name='Default VM Rule', user_data=ArkDPAUserData(roles=['DpaAdmin'], groups=[], users=[]), connection_information=ArkDPAVMConnectionInformation( connect_as={}, grant_access=2, idle_time=10, days_of_week=[], full_days=True, hours_from='07:00', hours_to='17:00', time_zone='Asia/Jerusalem', ), ) DEFAULT_GENERATED_PROVIDERS: Final[Dict[ArkWorkspaceType, ArkDPAVMProvider]] = { ArkWorkspaceType.AWS: ArkDPAVMAWSProviderData(regions=[], tags=[{'key': 'value'}], vpc_ids=[], account_ids=[]), ArkWorkspaceType.AZURE: ArkDPAVMAzureProviderData( regions=[], tags=[{'key': 'value'}], resource_groups=[], vnet_ids=[], subscriptions=[] ), ArkWorkspaceType.GCP: ArkDPAVMGCPProviderData(regions=[], tags=[{'key': 'value'}], network_ids=[], projects=[]), ArkWorkspaceType.ONPREM: ArkDPAVMOnPremProviderData( fqdn_rules_conjunction=ArkDPAVMFQDNRulesConjunction.OR, fqdn_rules=[ArkDPAVMFQDNRule(operator=ArkDPAVMFQDNOperator.WILDCARD, computername_pattern='*', domain='default.com')], ), } DEFAULT_GENERATED_PROTOCOLS: Final[Dict[ArkProtocolType, ArkDPAVMConnectionDataType]] = { ArkProtocolType.SSH: 'root', ArkProtocolType.RDP: ArkDPAVMRDPLocalEphemeralUserConnectionData( local_ephemeral_user=ArkDPAVMLocalEphemeralUserConnectionMethodData(assign_groups={'Administrators'}) ), } SUPPORTED_SSH_PROTOCOL_PROVIDERS: Final[ArkWorkspaceType] = [ ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM, ] SUPPORTED_RDP_PROTOCOL_PROVIDERS: Final[ArkWorkspaceType] = [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.ONPREM] class ArkDPAVMPoliciesEditorService( ArkDPABasePoliciesEditorService[ArkDPAVMPolicy, ArkDPAVMPolicyListItem, ArkDPAVMAddPolicy, ArkDPAVMUpdatePolicy, ArkDPAVMGeneratePolicy] ):
def __init__(self, isp_auth: ArkISPAuth, policies_cache_dir: Optional[str] = None, profile: Optional[ArkProfile] = None) -> None:
1
2023-11-13 09:24:31+00:00
24k
i-super/Saleor
saleor/graphql/plugins/dataloaders.py
[ { "identifier": "PluginsManager", "path": "saleor/plugins/manager.py", "snippet": "class PluginsManager(PaymentInterface):\n \"\"\"Base manager for handling plugins logic.\"\"\"\n\n plugins_per_channel: dict[str, list[\"BasePlugin\"]] = {}\n global_plugins: list[\"BasePlugin\"] = []\n all_pl...
from collections import defaultdict from functools import partial, wraps from promise import Promise from ...plugins.manager import PluginsManager, get_plugins_manager from ...plugins.models import EmailTemplate from ..app.dataloaders import get_app_promise from ..core import SaleorContext from ..core.dataloaders import DataLoader
17,359
class EmailTemplatesByPluginConfigurationLoader(DataLoader): """Loads email templates by plugin configuration ID.""" context_key = "email_template_by_plugin_configuration" def batch_load(self, keys): email_templates = EmailTemplate.objects.using( self.database_connection_name ).filter(plugin_configuration_id__in=keys) config_to_template = defaultdict(list) for et in email_templates: config_to_template[et.plugin_configuration_id].append(et) return [config_to_template[key] for key in keys] class PluginManagerByRequestorDataloader(DataLoader): context_key = "plugin_manager_by_requestor" def batch_load(self, keys): allow_replica = getattr(self.context, "allow_replica", True)
class EmailTemplatesByPluginConfigurationLoader(DataLoader): """Loads email templates by plugin configuration ID.""" context_key = "email_template_by_plugin_configuration" def batch_load(self, keys): email_templates = EmailTemplate.objects.using( self.database_connection_name ).filter(plugin_configuration_id__in=keys) config_to_template = defaultdict(list) for et in email_templates: config_to_template[et.plugin_configuration_id].append(et) return [config_to_template[key] for key in keys] class PluginManagerByRequestorDataloader(DataLoader): context_key = "plugin_manager_by_requestor" def batch_load(self, keys): allow_replica = getattr(self.context, "allow_replica", True)
return [get_plugins_manager(lambda: key, allow_replica) for key in keys]
1
2023-11-13 05:00:35+00:00
24k
kampta/asic
train.py
[ { "identifier": "Logger", "path": "commons/logger.py", "snippet": "class Logger(SummaryWriter):\n\n def __init__(self, results_path, log_to_tb=False, log_to_wandb=True):\n super().__init__(results_path)\n self.results_path = results_path\n self.log_to_tb = log_to_tb\n self...
import argparse import torch import numpy as np import json import os import torch.nn.functional as F import wandb from torch import nn, optim from tqdm import tqdm from pathlib import Path from commons.logger import Logger, log_visuals from commons.distributed import get_rank, setup_distributed, reduce_loss_dict,\ get_world_size, primary from commons.utils import sample_tuples from datasets.cub import CUBDataset from datasets.in_memory import InMemoryDataset from datasets.spair import SpairDataset from datasets.utils import Augmentor from models.utils import accumulate, requires_grad from models.canonical import Canonical, CanonicalMLP from models.asic import Asic from losses.reg_losses import total_variation_loss from thirdparty.lpips.lpips import get_perceptual_loss from losses.matching_losses import LossCorrsSparse from thirdparty.gangealing.annealing import DecayingCosineAnnealingWarmRestarts,\ lr_cycle_iters
17,457
N = args.batch pairs = sample_tuples(len(train_dset), count=N // 2) src_idx, trg_idx = pairs[:, 0], pairs[:, 1] all_idx = np.concatenate([src_idx, trg_idx]) batch_imgs = all_imgs[all_idx] batch_parts = all_parts[all_idx] if args.use_nbb_parts: batch_masks = (batch_parts != num_parts).unsqueeze(1).float() batch_masks_resized = resize_fn(batch_masks) else: batch_masks = all_masks[all_idx] batch_masks_resized = resize_fn(batch_masks) kp1 = pseudo_kps[src_idx, trg_idx][:, :loss_topk] # (N/2, K, 4) kp2 = pseudo_kps[trg_idx, src_idx][:, :loss_topk] # (N/2, K, 4) batch_kps_vis = kp1[..., 2] > 0 # (N/2, K) batch_kps_wt = torch.ones_like(batch_kps_vis).float() # (N/2, K) batch_kps = torch.cat([kp1, kp2])[..., :2] # (N, K, 2) if args.use_nbb_parts: nbb_parts_vis = (kp1[..., 3] != args.num_parts) * (kp2[..., 3] != args.num_parts) batch_kps_wt *= nbb_parts_vis # Map the images to the canonical space flow, delta_flow = stn(batch_imgs) unwarped = canon.unwarp(flow, args.unwarp_size) # NBB weight if args.nbb_weight > 0.: nbb_loss = nbb_loss_fn(flow[:N//2], flow[N//2:], batch_kps[:N//2], batch_kps[N//2:], batch_kps_vis, batch_kps_wt) if args.equi_weight > 0.: # Apply tps transformations if args.disable_tps: batch_imgs_t = aug.forward_geom(aug.forward_color(batch_imgs)) batch_masks_t = aug.forward_geom(batch_masks, fixed=True) # Apply tps to flow flow_tf = aug.forward_geom(flow.permute(0, 3, 1, 2), fixed=True).permute(0, 2, 3, 1) else: batch_imgs_t = aug.forward_tps(aug.forward_color(batch_imgs)) batch_masks_t = aug.forward_tps(batch_masks, fixed=True) # Apply tps to flow flow_tf = aug.forward_tps(flow.permute(0, 3, 1, 2), fixed=True).permute(0, 2, 3, 1) batch_masks_t = torch.where(batch_masks_t > 0.5, 1., 0.) batch_masks_t_resized = resize_fn(batch_masks_t) vis = batch_masks_t * batch_masks # Flow of tps image flow_ft, _ = stn(batch_imgs_t) unwarped_ft = canon.unwarp(flow_ft, args.unwarp_size) equi_loss = F.l1_loss(flow_ft, flow_tf.detach(), reduction='none') \ + F.l1_loss(flow_tf, flow_ft.detach(), reduction='none') equi_loss = (equi_loss * vis.squeeze(1).unsqueeze(-1)).mean() if args.mask_weight > 0: unwarped_mask = unwarped[:, [3]] mask_loss = F.binary_cross_entropy_with_logits(unwarped_mask, batch_masks_resized) if args.equi_weight > 0.: unwarped_ft_mask = unwarped_ft[:, [3]] mask_loss = 0.5 * mask_loss + \ 0.5 * F.binary_cross_entropy_with_logits( unwarped_ft_mask, batch_masks_t_resized) # Get Total Variation Loss on flow if args.flow_tv_weight > 0: flow_tv_loss = total_variation_loss(delta_flow) # Reconstruction loss if args.rec_weight > 0: unwarped = unwarped * batch_masks_resized resized_img = resize_fn(batch_imgs) * batch_masks_resized rec_loss = loss_fn(unwarped[:, :3], resized_img).mean() if args.equi_weight > 0.: unwarped_ft = unwarped_ft * batch_masks_t_resized resized_img = resize_fn(batch_imgs_t) * batch_masks_t_resized rec_loss = 0.5*rec_loss + 0.5 * loss_fn(unwarped_ft[:, :3], resized_img).mean() # Parts Loss if args.parts_weight > 0.: # Calculate the centroid of each part part_centroids = torch.zeros(num_parts+1, 2, dtype=torch.float, device=device) part_centroids.index_add_(0, batch_parts.reshape(-1), flow.reshape(-1, 2)) part_counts = torch.bincount(batch_parts.reshape(-1)).float() part_centroids = (part_centroids/part_counts.unsqueeze(-1)).detach() # Compute the loss as the distance of the centroid from the flows parts_loss = F.l1_loss(flow, part_centroids[batch_parts], reduction='none') parts_loss = (parts_loss * batch_masks.squeeze(1).unsqueeze(-1)).mean() loss_dict = {"p": rec_loss, "ftv": flow_tv_loss, "nbb": nbb_loss, "equi": equi_loss, "mask": mask_loss, 'parts': parts_loss} canon.zero_grad() stn.zero_grad() full_stn_loss = args.rec_weight * rec_loss + \ args.flow_tv_weight * flow_tv_loss + \ args.nbb_weight * nbb_loss + args.equi_weight * equi_loss + \ args.mask_weight * mask_loss + args.parts_weight * parts_loss full_stn_loss.backward() t_optim.step() epoch = max(0, i / args.period) t_sched.step(epoch) if args.canon_lr > 0: canon_optim.step() canon_sched.step(epoch) if args.stn_ema:
def save_state_dict(ckpt_name, c_module, t_module, c_ema, t_ema, canon_optim, canon_sched, t_optim, t_sched, args, step, add_step_to_name=False): ckpt_dict = { "canon": c_module.state_dict(), "t": t_module.state_dict(), "c_ema": c_ema.state_dict(), "t_ema": t_ema.state_dict(), "t_optim": t_optim.state_dict(), "t_sched": t_sched.state_dict(), "canon_optim": canon_optim.state_dict() if canon_optim is not None else None, "canon_sched": canon_sched.state_dict() if canon_sched is not None else None, "args": args, "iter": step } torch.save(ckpt_dict, f'{results_path}/{ckpt_name}.pt') if add_step_to_name: torch.save(ckpt_dict, f'{results_path}/{ckpt_name}_{step:07d}.pt') def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) def base_training_argparse(): parser = argparse.ArgumentParser(description="Training") # Main training arguments: parser.add_argument("--exp-name", type=str, required=True, help="Name for experiment run (used for logging)") parser.add_argument("--results", type=str, default='logs', help='path to the results directory') parser.add_argument("--seed", default=0, type=int, help='Random seed for this experiment') parser.add_argument("--dset", type=str, default='cub', choices=["cub", "spair"]) parser.add_argument("--img_dir", type=str, required=True, help="Path to real data") parser.add_argument("--flow_dir", type=str, default='processed_data', help="Path to preprocessed flows") parser.add_argument("--mask_threshold", type=int, default=1, help="Threshold for masking") parser.add_argument("--mask_bbox_pad", type=int, default=4, help="Crop with some padding") parser.add_argument("--img_size", default=256, type=int, help='resolution of real images') parser.add_argument("--iter", type=int, default=20000, help="total training iterations") parser.add_argument("--batch", type=int, default=20, help="batch size per-GPU") parser.add_argument("--num_workers", type=int, default=2, help="num workers for dataloader") # Dataset hyperparameters: parser.add_argument("--cub_idx", type=int, default=1, help="cub category") parser.add_argument("--split", default='test', choices=['test', 'val'], help='splits for training and validation') parser.add_argument("--use_coseg_masks", action='store_true') parser.add_argument("--num_parts", default=4, type=int) parser.add_argument("--spair_cat", default='cat', help="cub category") # Loss hyperparameters: parser.add_argument("--loss_fn", type=str, default='vgg_ssl', choices=['lpips', 'vgg_ssl'], help="The perceptual loss to use.") parser.add_argument("--rec_weight", type=float, default=1., help='weight for reconstruction loss') parser.add_argument("--nbb_weight", type=float, default=30., help='weight for nbb loss') parser.add_argument("--flow_tv_weight", default=15000.0, type=float, help="""Loss weighting of the Total Variation smoothness regularizer on the residual flow""") parser.add_argument("--equi_weight", default=1.0, type=float, help='Loss weighting for equivariance') parser.add_argument("--sparse_topk", type=int, default=None, help='number of sparse correspondences for loss') parser.add_argument("--sparse_temp", type=float, default=1, help='temperature for sparse loss') parser.add_argument("--mask_weight", default=0.1, type=float, help="""Loss weighting of the mask""") parser.add_argument("--parts_weight", default=10.0, type=float, help="""Loss weighting of the Parts Mask""") parser.add_argument("--use_nbb_parts", action='store_true') # Augmentation hyperparameters parser.add_argument("--jitter", default=[0.4, 0.4, 0.2, 0.1], type=float, nargs='+', help='augmentation mode') parser.add_argument("--jitter_prob", default=0.8, type=float) parser.add_argument("--gray_prob", default=0.2, type=float) parser.add_argument("--solar_prob", default=0.2, type=float) parser.add_argument("--tps_scale", default=0.4, type=float) # Canonical space parser.add_argument("--unwarp_size", type=int, default=128, help="resolution for unwarping") # Learned Grid hyperparameters parser.add_argument("--canon_size", type=int, default=256, help="resolution of canonical space") parser.add_argument("--clamp", action='store_true', help="clamp values of canonical space (-1, 1)") # MLP Hyperparams parser.add_argument("--use_mlp", action='store_true') parser.add_argument("--mlp_hidden_dim", type=int, default=256, help="number of hidden units per layer") parser.add_argument("--mlp_num_layers", type=int, default=8, help="number of layers") parser.add_argument("--mlp_skip_layers", type=int, nargs='+', default=[4, 7], help="skip layers") # Model hyperparameters: parser.add_argument("--canon_lr", type=float, default=0.003, help="base learning rate of canonical space") parser.add_argument("--canon_ema", action='store_true', help='Enable ema for canonical space') parser.add_argument("--stn_ema", action='store_true', help='Enable ema for canonical space') parser.add_argument("--stn_lr", type=float, default=0.003, help="base learning rate of SpatialTransformer") parser.add_argument("--flow_ssl", action='store_true', help="""If specified, apply STN on SSL features)""") parser.add_argument("--channel_multiplier", default=0.5, type=float, help='channel multiplier for smaller models') parser.add_argument("--bilinear", action='store_true', help='Apply bilinear upsample/downsample') parser.add_argument("--padding_mode", default='border', choices=['border', 'zeros', 'reflection'], type=str, help="""Padding algorithm for when the STN samples beyond image boundaries""") parser.add_argument("--use_tanh", action='store_true', help='Use tanh activation at the flow output') parser.add_argument("--disable_tps", action='store_true', help='disable tps transformations') # Backbone parameters parser.add_argument("--bb", default='dino_vits8', choices=['dino_vits8', 'dino_vits16', 'dino_vitb8', 'dino_vitb16', 'vit_small_patch8_224', 'vit_small_patch16_224', 'vit_base_patch16_224'], help='backbone models') parser.add_argument('--bb_stride', default=2, type=int, help="stride.") # Visualization hyperparameters: parser.add_argument("--vis_every", type=int, default=500, help="""frequency with which visualizations are generated during training""") parser.add_argument("--vis_denseres", type=int, default=32, help='number of sparse correspondences to visualize') parser.add_argument("--ckpt_every", type=int, default=10000, help='frequency of checkpointing during training') parser.add_argument("--log_every", default=25, type=int, help='How frequently to log data to TensorBoard') parser.add_argument("--n_sample", type=int, default=4, help="""number of images (real and fake) to generate visuals for""") parser.add_argument("--disable_wandb", action='store_true', help='Disable wandb for debugging') # Learning Rate scheduler hyperparameters: parser.add_argument("--period", default=10000, type=float, help="""Period for cosine learning rate scheduler (measured in gradient steps)""") parser.add_argument("--decay", default=0.9, type=float, help="""Decay factor for the cosine learning rate scheduler""") parser.add_argument("--tm", default=2, type=int, help="""Period multiplier for the cosine learning rate scheduler""") return parser def train(args, train_dset, canon, stn, c_ema, t_ema, canon_optim, canon_sched, t_optim, t_sched, loss_fn, nbb_loss_fn, device, writer): # Record modules to make saving checkpoints easier: if args.distributed: t_module = stn.module c_module = canon.module else: t_module = stn c_module = canon # Initialize Spatial Transformation Generator (Thin Plate Spline) aug = Augmentor(jitter=args.jitter, jitter_prob=args.jitter_prob, gray_prob=args.gray_prob, solar_prob=args.solar_prob, tps_scale=args.tps_scale).to(device) # A model checkpoint will be saved whenever the learning rate is zero: zero_lr_iters = lr_cycle_iters(0, args.period, args.iter, args.tm) early_ckpt_iters = set(zero_lr_iters) early_vis_iters = {100} early_vis_iters.update(early_ckpt_iters) # Initialize various training variables and constants: rec_loss = torch.tensor(0.0, device='cuda') flow_tv_loss = torch.tensor(0.0, device='cuda') nbb_loss = torch.tensor(0.0, device='cuda') equi_loss = torch.tensor(0.0, device='cuda') mask_loss = torch.tensor(0.0, device='cuda') parts_loss = torch.tensor(0.0, device='cuda') accum = 0.5 ** (32 / (10 * 1000)) # Resize function for perceptual loss if args.unwarp_size != args.img_size: scale_factor = args.unwarp_size / args.img_size resize_fn = nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=True) else: resize_fn = nn.Identity() # Pre-load on GPU # Assuming ~30 images of size 256x256, takes up ~23 MB device memory has_gt_kp = train_dset.kps is not None all_imgs = train_dset.imgs = train_dset.imgs.to(device) # / 127.5 - 1.0 all_masks = train_dset.masks = train_dset.masks.unsqueeze(1).to(device) all_parts = train_dset.parts = train_dset.parts.to(device) if has_gt_kp: all_kps = train_dset.kps = train_dset.kps.to(device) # Pseudo GT pseudo_kps = train_dset.pseudo_kps = torch.from_numpy(train_dset.pseudo_kps).to(device) num_parts = train_dset.num_parts loss_topk = pseudo_kps.shape[2] if args.sparse_topk is None else min(args.sparse_topk, pseudo_kps.shape[2]) # Progress bar for monitoring training: pbar = range(args.start_iter, args.iter) if primary(): pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.2) pck_pairs, pck_cycles = log_visuals( c_ema, t_ema, train_dset, 0, writer, vis_sample=args.n_sample, vis_denseres=args.vis_denseres) best_pck_pairs = pck_pairs best_pck_cycles = pck_cycles requires_grad(stn, True) requires_grad(canon, True) for idx in pbar: # main training loop i = idx + args.start_iter + 1 #################################### # TRAIN STN and CANON # #################################### N = args.batch pairs = sample_tuples(len(train_dset), count=N // 2) src_idx, trg_idx = pairs[:, 0], pairs[:, 1] all_idx = np.concatenate([src_idx, trg_idx]) batch_imgs = all_imgs[all_idx] batch_parts = all_parts[all_idx] if args.use_nbb_parts: batch_masks = (batch_parts != num_parts).unsqueeze(1).float() batch_masks_resized = resize_fn(batch_masks) else: batch_masks = all_masks[all_idx] batch_masks_resized = resize_fn(batch_masks) kp1 = pseudo_kps[src_idx, trg_idx][:, :loss_topk] # (N/2, K, 4) kp2 = pseudo_kps[trg_idx, src_idx][:, :loss_topk] # (N/2, K, 4) batch_kps_vis = kp1[..., 2] > 0 # (N/2, K) batch_kps_wt = torch.ones_like(batch_kps_vis).float() # (N/2, K) batch_kps = torch.cat([kp1, kp2])[..., :2] # (N, K, 2) if args.use_nbb_parts: nbb_parts_vis = (kp1[..., 3] != args.num_parts) * (kp2[..., 3] != args.num_parts) batch_kps_wt *= nbb_parts_vis # Map the images to the canonical space flow, delta_flow = stn(batch_imgs) unwarped = canon.unwarp(flow, args.unwarp_size) # NBB weight if args.nbb_weight > 0.: nbb_loss = nbb_loss_fn(flow[:N//2], flow[N//2:], batch_kps[:N//2], batch_kps[N//2:], batch_kps_vis, batch_kps_wt) if args.equi_weight > 0.: # Apply tps transformations if args.disable_tps: batch_imgs_t = aug.forward_geom(aug.forward_color(batch_imgs)) batch_masks_t = aug.forward_geom(batch_masks, fixed=True) # Apply tps to flow flow_tf = aug.forward_geom(flow.permute(0, 3, 1, 2), fixed=True).permute(0, 2, 3, 1) else: batch_imgs_t = aug.forward_tps(aug.forward_color(batch_imgs)) batch_masks_t = aug.forward_tps(batch_masks, fixed=True) # Apply tps to flow flow_tf = aug.forward_tps(flow.permute(0, 3, 1, 2), fixed=True).permute(0, 2, 3, 1) batch_masks_t = torch.where(batch_masks_t > 0.5, 1., 0.) batch_masks_t_resized = resize_fn(batch_masks_t) vis = batch_masks_t * batch_masks # Flow of tps image flow_ft, _ = stn(batch_imgs_t) unwarped_ft = canon.unwarp(flow_ft, args.unwarp_size) equi_loss = F.l1_loss(flow_ft, flow_tf.detach(), reduction='none') \ + F.l1_loss(flow_tf, flow_ft.detach(), reduction='none') equi_loss = (equi_loss * vis.squeeze(1).unsqueeze(-1)).mean() if args.mask_weight > 0: unwarped_mask = unwarped[:, [3]] mask_loss = F.binary_cross_entropy_with_logits(unwarped_mask, batch_masks_resized) if args.equi_weight > 0.: unwarped_ft_mask = unwarped_ft[:, [3]] mask_loss = 0.5 * mask_loss + \ 0.5 * F.binary_cross_entropy_with_logits( unwarped_ft_mask, batch_masks_t_resized) # Get Total Variation Loss on flow if args.flow_tv_weight > 0: flow_tv_loss = total_variation_loss(delta_flow) # Reconstruction loss if args.rec_weight > 0: unwarped = unwarped * batch_masks_resized resized_img = resize_fn(batch_imgs) * batch_masks_resized rec_loss = loss_fn(unwarped[:, :3], resized_img).mean() if args.equi_weight > 0.: unwarped_ft = unwarped_ft * batch_masks_t_resized resized_img = resize_fn(batch_imgs_t) * batch_masks_t_resized rec_loss = 0.5*rec_loss + 0.5 * loss_fn(unwarped_ft[:, :3], resized_img).mean() # Parts Loss if args.parts_weight > 0.: # Calculate the centroid of each part part_centroids = torch.zeros(num_parts+1, 2, dtype=torch.float, device=device) part_centroids.index_add_(0, batch_parts.reshape(-1), flow.reshape(-1, 2)) part_counts = torch.bincount(batch_parts.reshape(-1)).float() part_centroids = (part_centroids/part_counts.unsqueeze(-1)).detach() # Compute the loss as the distance of the centroid from the flows parts_loss = F.l1_loss(flow, part_centroids[batch_parts], reduction='none') parts_loss = (parts_loss * batch_masks.squeeze(1).unsqueeze(-1)).mean() loss_dict = {"p": rec_loss, "ftv": flow_tv_loss, "nbb": nbb_loss, "equi": equi_loss, "mask": mask_loss, 'parts': parts_loss} canon.zero_grad() stn.zero_grad() full_stn_loss = args.rec_weight * rec_loss + \ args.flow_tv_weight * flow_tv_loss + \ args.nbb_weight * nbb_loss + args.equi_weight * equi_loss + \ args.mask_weight * mask_loss + args.parts_weight * parts_loss full_stn_loss.backward() t_optim.step() epoch = max(0, i / args.period) t_sched.step(epoch) if args.canon_lr > 0: canon_optim.step() canon_sched.step(epoch) if args.stn_ema:
accumulate(t_ema, t_module, accum)
12
2023-11-14 16:43:16+00:00
24k
tyang816/ProtSSN
src/data.py
[ { "identifier": "CathDataset", "path": "src/dataset/cath_dataset.py", "snippet": "class CathDataset(InMemoryDataset):\n r\"\"\"\n Args:\n root (string): Root directory where the dataset should be saved.\n name (string): The name of the dataset.\n raw_dir (string, optional): Ro...
import os, sys import argparse from src.dataset.cath_dataset import CathDataset from src.dataset.mutant_dataset import MutantDataset from src.utils.dataset_utils import NormalizeProtein
17,167
# set path current_dir = os.getcwd() sys.path.append(current_dir) def build_cath_dataset(args, split):
# set path current_dir = os.getcwd() sys.path.append(current_dir) def build_cath_dataset(args, split):
dataset = CathDataset(
0
2023-11-10 07:21:37+00:00
24k
atlantic-quantum/Shipyard
tests/printers/visualizer/test_visualize_pulse_sequences.py
[ { "identifier": "CoreType", "path": "shipyard/awg_core/awg_core.py", "snippet": "class CoreType(Enum):\n \"\"\"Enumeration of AWG Core types\"\"\"\n\n HD = \"HD\"\n QA = \"QA\"\n SG = \"SG\"" }, { "identifier": "ActivationRecord", "path": "shipyard/call_stack.py", "snippet": ...
import codecs import json import numpy as np import pytest from pathlib import Path from shipyard.awg_core.awg_core import CoreType from shipyard.call_stack import ActivationRecord, ARType from shipyard.compiler import Compiler from shipyard.duration import Duration, TimeUnits from shipyard.passes.duration_transformer import DurationTransformer from shipyard.passes.resolve_io_declaration import ResolveIODeclaration from shipyard.passes.semantic_analysis.semantic_analyzer import SemanticAnalyzer from shipyard.printers.visualizer.visualize_pulse_sequence import PulseVisualizer from shipyard.printers.zi import waveform_functions from shipyard.setup.internal import Frame, Instrument, Port, SetupInternal
17,463
final_call_stack = { "nested_subroutines": {"dummy": 16}, "complex_arrays": { "dummy": 4, "two_d": [[1, 2], [3, 4], [5, 6]], "my_arr": [complex(1, 0), complex(0, 1), complex(0.8, 0.6)], "second": [1, 2, 3, 4], }, } def files() -> list[str]: base_path = Path(__file__).parent.parent.parent / "qasm/visualize_pulse" plen = len(base_path.parts) FILES = list(base_path.glob("**/*.qasm")) return [str(Path(*path.parts[plen:])) for path in FILES] QASM_FILES = files() def common_files() -> list[str]: files = [] cut = -5 for q_file in QASM_FILES: files.append(q_file[:cut]) return files COMMON_FILES = common_files() @pytest.fixture(name="basic_setup") def fixture_basic_setup() -> SetupInternal: json_path = Path(__file__).parent.parent.parent / "setups/interpreter.json" return SetupInternal.from_json(json_path) def test_visit_ClassicalDeclaration(): setup_path = Path(__file__).parent.parent.parent / "setups/complex.json" qasm_path = Path(__file__).parent.parent.parent / "qasm/interpreter/phase_freq.qasm" compiler = Compiler(qasm_path, setup_path) qasm_ast = compiler.load_program(qasm_path) ResolveIODeclaration().visit(qasm_ast) SemanticAnalyzer().visit(qasm_ast) DurationTransformer().visit(qasm_ast)
final_call_stack = { "nested_subroutines": {"dummy": 16}, "complex_arrays": { "dummy": 4, "two_d": [[1, 2], [3, 4], [5, 6]], "my_arr": [complex(1, 0), complex(0, 1), complex(0.8, 0.6)], "second": [1, 2, 3, 4], }, } def files() -> list[str]: base_path = Path(__file__).parent.parent.parent / "qasm/visualize_pulse" plen = len(base_path.parts) FILES = list(base_path.glob("**/*.qasm")) return [str(Path(*path.parts[plen:])) for path in FILES] QASM_FILES = files() def common_files() -> list[str]: files = [] cut = -5 for q_file in QASM_FILES: files.append(q_file[:cut]) return files COMMON_FILES = common_files() @pytest.fixture(name="basic_setup") def fixture_basic_setup() -> SetupInternal: json_path = Path(__file__).parent.parent.parent / "setups/interpreter.json" return SetupInternal.from_json(json_path) def test_visit_ClassicalDeclaration(): setup_path = Path(__file__).parent.parent.parent / "setups/complex.json" qasm_path = Path(__file__).parent.parent.parent / "qasm/interpreter/phase_freq.qasm" compiler = Compiler(qasm_path, setup_path) qasm_ast = compiler.load_program(qasm_path) ResolveIODeclaration().visit(qasm_ast) SemanticAnalyzer().visit(qasm_ast) DurationTransformer().visit(qasm_ast)
pv = PulseVisualizer(
9
2023-11-16 17:37:29+00:00
24k
quantuminterface/qiclib
src/qiclib/code/qi_jobs.py
[ { "identifier": "TaskRunner", "path": "src/qiclib/hardware/taskrunner.py", "snippet": "class TaskRunner(PlatformComponent):\n \"\"\"Driver to control the Taskrunner on the Hardware Platform.\"\"\"\n\n def __init__(\n self,\n name: str,\n connection,\n controller,\n ...
import os import json import functools import warnings import numpy as np import qiclib.packages.utility as util from abc import abstractmethod from typing import Dict, List, Callable, Optional, Union, Set, Any, Type from ..hardware.taskrunner import TaskRunner from ..experiment.qicode.data_provider import DataProvider from ..experiment.qicode.data_handler import DataHandler from .qi_seq_instructions import SequencerInstruction from .qi_var_definitions import ( _QiVariableBase, _QiCalcBase, _QiConstValue, QiCellProperty, QiExpression, QiVariableSet, QiCondition, ) from .qi_pulse import QiPulse from .qi_visitor import ( QiCMContainedCellVisitor, QiResultCollector, QiVarInForRange, ) from .qi_prog_builder import QiProgramBuilder from .qi_types import ( QiType, QiPostTypecheckVisitor, QiTypeFallbackVisitor, _TypeDefiningUse, ) from .qi_types import _TypeDefiningUse from .qi_types import _TypeDefiningUse from .qi_types import ( _TypeConstraintReasonQiCommand, _IllegalTypeReason, _add_equal_constraints, ) from .qi_types import ( _TypeConstraintReasonQiCommand, _IllegalTypeReason, _add_equal_constraints, ) from .analysis.qi_insert_mem_parameters import ( insert_recording_offset_store_commands, insert_manipulation_pulse_frequency_store_commands, insert_readout_pulse_frequency_store_commands, ) from .qi_simulate import Simulator from ..experiment.qicode.base import QiCodeExperiment from qiclib.experiment.qicode.base import _TaskrunnerSettings from .qi_visitor import QiStringifyJob
16,846
raise RuntimeError("Can not use command outside QiJob context manager.") _QiJobReference._add_command(cmd) def _set_job_reference(job): """Used for testing purposes""" # pylint: disable=global-statement global _QiJobReference _QiJobReference = job def _delete_job_reference(): """Used for testing purposes""" # pylint: disable=global-statement global _QiJobReference _QiJobReference = None class QiCell: """A QiCell is an abstract representation of the qubit/cell the program is run on. Usually, a single :python:`QiCell` is not instantiated, but instead a :class:`QiCells` object. For a single :python:`QiCell`, use instead :python:`QiCells(1)` A :python:`QiCell` must be instantiated inside within a :class:`QiJob` context. The :python:`QiCell` object can be used to get properties that are defined on :class:`QiSamples <QiSample>`. For this, index the :python:`QiCell` object using the name of the property: .. code-block:: python q: QiCell = ... t1_time = q["t1"] The actual value for the accessed property (in the example above, the T1 time) is filled in when executing a :class:`QiJob` and providing the actual sample. **Tasks of the QiCell**: - Saves the pulses needed for program execution. - Provides a dictionary functionality to define commonly used durations/properties. - Implements a Sequencer object, which contains the assembler program after compilation. :param cellID: A unique ID :raises RuntimeError: When the :python:`QiCell` is instantiated outside a `QiJob` """ def __init__(self, cellID: int): if not isinstance(_QiJobReference, QiJob): raise RuntimeError("QiCell can't be used outside of QiJob.") self.cellID = cellID self.manipulation_pulses: List[QiPulse] = [] self.flux_pulses: List[QiPulse] = [] self.readout_pulses: List[QiPulse] = [] self._result_container: Dict[str, QiResult] = {} # The order in which recorded values are assigned to which result container self._result_recording_order: List[QiResult] = [] self._unresolved_property: Set[QiCellProperty] = set() self._job_ref = _QiJobReference self._relevant_vars: Set[_QiVariableBase] = set() # These attributes are determined by dataflow analyses self._initial_manip_freq: float = None self._initial_readout_freq: float = None self._initial_rec_offset: float = None self._rec_length: Union[int, float, QiCellProperty] = None self._properties: Dict[QiCellProperty, Any] = {} def __getitem__(self, key): if _QiJobReference != self._job_ref: raise RuntimeError( "Tried getting values for cells registered to other QiJob" ) prop = self._properties.get(key, QiCellProperty(self, key)) if isinstance(prop, QiCellProperty): self._unresolved_property.add(key) return prop def __setitem__(self, key, value): if _QiJobReference != self._job_ref: raise RuntimeError( "Tried setting values for cells registered to other QiJob" ) self._properties[key] = value def __call__(self, qic): return qic.cell[self.qic_cell] def get_properties(self): return self._properties.copy() def add_pulse(self, pulse: QiPulse): if pulse not in self.manipulation_pulses: self.manipulation_pulses.append(pulse) if len(self.manipulation_pulses) > 13: raise RuntimeError("Too many pulses in use") return self.manipulation_pulses.index(pulse) + 1 # index 0 and 15 are reserved @property def initial_manipulation_frequency(self): if self._initial_manip_freq is None: if len(self.manipulation_pulses) > 0: warnings.warn( "Manipulation pulses without frequency given, using 90 MHz." ) return 90e6 # Default frequency freq = self._initial_manip_freq return freq() if isinstance(freq, QiCellProperty) else freq def add_recording_length(self, length): if self._rec_length is None: self._rec_length = length elif ( not self._rec_length._equal_syntax(length)
# Copyright © 2017-2023 Quantum Interface (quantuminterface@ipe.kit.edu) # Richard Gebauer, IPE, Karlsruhe Institute of Technology # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. """ This is the main module of QiCode. Here, all important commands write QiPrograms are defined. """ class QiResult: """Result of an experiment. Can be accessed via :python:`job.cells[cell_index].data("result name")`. Where :python:`cells` denotes a :class:`QiCells` object and :python:`cell_index` an integer. The actual data can be retrieved as a numpy array using the :meth:`get` Method Example ------- .. code-block:: python qic: QiController = ... sample: QiSample = ... with QiJob() as job: q = QiCells(1) Readout(q[0], save_to="result") job.run(qic, sample, averages=1000) data = job.cells[0].data("result") :param name: The name of the variable, by default None """ def __init__(self, name: Optional[str] = None) -> None: self._cell = None self.data = None self.recording_count = 0 self.name: str = "" if name is None else name def get(self) -> np.ndarray: """gets the data of the result as a numpy array :return: The data of the experiment """ return np.array(self.data) def __str__(self) -> str: return f'QiResult("{self.name}")' class QiCommand: """Base class of every Job command. Provides _relevant_cells, containing every cell used for the execution of the command. Provides _associated_variable_set, containing every variable needed for the execution of the command. """ def __init__(self) -> None: self._associated_variable_set = QiVariableSet() self._relevant_cells: Set[QiCell] = set() @abstractmethod def accept(self, visitor, *input): raise RuntimeError( f"{self.__class__} doesn't implement `accept`. This is a bug." ) def is_variable_relevant(self, variable: _QiVariableBase) -> bool: return variable in self._associated_variable_set def add_associated_variable(self, x): if isinstance(x, _QiVariableBase): self._associated_variable_set.add(x) def __str__(self) -> str: return "cQiCommand" def _stringify(self) -> str: raise NotImplementedError(f"_stringify not implemented for {repr(self)}") _QiJobReference = None def _add_cmd_to_job(cmd: QiCommand): if _QiJobReference is None: raise RuntimeError("Can not use command outside QiJob context manager.") _QiJobReference._add_command(cmd) def _set_job_reference(job): """Used for testing purposes""" # pylint: disable=global-statement global _QiJobReference _QiJobReference = job def _delete_job_reference(): """Used for testing purposes""" # pylint: disable=global-statement global _QiJobReference _QiJobReference = None class QiCell: """A QiCell is an abstract representation of the qubit/cell the program is run on. Usually, a single :python:`QiCell` is not instantiated, but instead a :class:`QiCells` object. For a single :python:`QiCell`, use instead :python:`QiCells(1)` A :python:`QiCell` must be instantiated inside within a :class:`QiJob` context. The :python:`QiCell` object can be used to get properties that are defined on :class:`QiSamples <QiSample>`. For this, index the :python:`QiCell` object using the name of the property: .. code-block:: python q: QiCell = ... t1_time = q["t1"] The actual value for the accessed property (in the example above, the T1 time) is filled in when executing a :class:`QiJob` and providing the actual sample. **Tasks of the QiCell**: - Saves the pulses needed for program execution. - Provides a dictionary functionality to define commonly used durations/properties. - Implements a Sequencer object, which contains the assembler program after compilation. :param cellID: A unique ID :raises RuntimeError: When the :python:`QiCell` is instantiated outside a `QiJob` """ def __init__(self, cellID: int): if not isinstance(_QiJobReference, QiJob): raise RuntimeError("QiCell can't be used outside of QiJob.") self.cellID = cellID self.manipulation_pulses: List[QiPulse] = [] self.flux_pulses: List[QiPulse] = [] self.readout_pulses: List[QiPulse] = [] self._result_container: Dict[str, QiResult] = {} # The order in which recorded values are assigned to which result container self._result_recording_order: List[QiResult] = [] self._unresolved_property: Set[QiCellProperty] = set() self._job_ref = _QiJobReference self._relevant_vars: Set[_QiVariableBase] = set() # These attributes are determined by dataflow analyses self._initial_manip_freq: float = None self._initial_readout_freq: float = None self._initial_rec_offset: float = None self._rec_length: Union[int, float, QiCellProperty] = None self._properties: Dict[QiCellProperty, Any] = {} def __getitem__(self, key): if _QiJobReference != self._job_ref: raise RuntimeError( "Tried getting values for cells registered to other QiJob" ) prop = self._properties.get(key, QiCellProperty(self, key)) if isinstance(prop, QiCellProperty): self._unresolved_property.add(key) return prop def __setitem__(self, key, value): if _QiJobReference != self._job_ref: raise RuntimeError( "Tried setting values for cells registered to other QiJob" ) self._properties[key] = value def __call__(self, qic): return qic.cell[self.qic_cell] def get_properties(self): return self._properties.copy() def add_pulse(self, pulse: QiPulse): if pulse not in self.manipulation_pulses: self.manipulation_pulses.append(pulse) if len(self.manipulation_pulses) > 13: raise RuntimeError("Too many pulses in use") return self.manipulation_pulses.index(pulse) + 1 # index 0 and 15 are reserved @property def initial_manipulation_frequency(self): if self._initial_manip_freq is None: if len(self.manipulation_pulses) > 0: warnings.warn( "Manipulation pulses without frequency given, using 90 MHz." ) return 90e6 # Default frequency freq = self._initial_manip_freq return freq() if isinstance(freq, QiCellProperty) else freq def add_recording_length(self, length): if self._rec_length is None: self._rec_length = length elif ( not self._rec_length._equal_syntax(length)
if isinstance(self._rec_length, QiExpression)
8
2023-11-10 10:26:10+00:00
24k
jpcadena/fastapi-boilerplate
app/api/api_v1/router/auth.py
[ { "identifier": "get_redis_dep", "path": "app/api/deps.py", "snippet": "async def get_redis_dep(\n redis_dependency: Annotated[RedisDependency, Depends()]\n) -> AsyncGenerator[Redis, None]: # type: ignore\n \"\"\"\n Lazy generation of Redis dependency\n :param redis_dependency: The dependen...
import logging from typing import Annotated, Any, Optional from fastapi import ( APIRouter, Body, Depends, Header, HTTPException, Path, Request, status, ) from fastapi.security import OAuth2PasswordRequestForm from pydantic import EmailStr from redis.asyncio import Redis from starlette.datastructures import Address from app.api.deps import get_redis_dep from app.api.oauth2_validation import get_current_user, get_refresh_current_user from app.config.config import ( get_auth_settings, get_init_settings, get_settings, init_setting, ) from app.config.db.auth_settings import AuthSettings from app.config.init_settings import InitSettings from app.config.settings import Settings from app.core.security.password import verify_password from app.exceptions.exceptions import NotFoundException, ServiceException from app.models.sql.user import User as UserDB from app.schemas.external.msg import Msg from app.schemas.external.token import TokenResetPassword, TokenResponse from app.schemas.external.user import ( UserResponse, UserUpdate, UserUpdateResponse, ) from app.schemas.infrastructure.user import UserAuth from app.services.infrastructure.auth import common_auth_procedure from app.services.infrastructure.token import TokenService from app.services.infrastructure.user import UserService, get_user_service from app.tasks.email_tasks.email_tasks import ( send_password_changed_confirmation_email, send_reset_password_email, ) from app.utils.security.password import ( generate_password_reset_token, verify_password_reset_token, )
14,722
return current_user @router.post("/recover-password/{email}", response_model=Msg) async def recover_password( settings: Annotated[Settings, Depends(get_settings)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], email: Annotated[ EmailStr, Path( ..., title="Email", description="The email used to recover the password", example={"email": "someone@example.com"}, openapi_examples=init_setting.EMAIL_BODY_EXAMPLES, ), ], user_service: Annotated[UserService, Depends(get_user_service)], init_settings: Annotated[InitSettings, Depends(get_init_settings)], ) -> Msg: """ Endpoint to handle password recovery. ## Parameter: - `email:` **Path parameter that references the email used to recover the password** - `type:` **EmailStr** ## Response: - `return:` **Message object** - `rtype:` **Msg** \f :param user_service: Dependency method for User service object :type user_service: UserService :param settings: Dependency method for cached setting object :type settings: config.Settings :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param init_settings: Dependency method for cached init setting object :type init_settings: InitSettings """ try: user: Optional[UserResponse] = await user_service.get_user_by_email( email ) except ServiceException as exc: logger.error(exc) user = None if user: password_reset_token: str = generate_password_reset_token( email, auth_settings ) await send_reset_password_email( user.email, user.username, password_reset_token, settings, init_settings, auth_settings, ) return Msg(msg="If the email is registered, a reset link will be sent.") @router.post("/reset-password", response_model=Msg) async def reset_password( settings: Annotated[Settings, Depends(get_settings)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], user_service: Annotated[UserService, Depends(get_user_service)], token_reset_password: Annotated[ TokenResetPassword, Body( ..., title="Body object", description="Object with access token and new password", openapi_examples=init_setting.TOKEN_PAYLOAD_EXAMPLES, ), ], init_settings: Annotated[InitSettings, Depends(get_init_settings)], ) -> Msg: """ Endpoint to handle password reset. ## Parameter: - `token_reset_password:` **Body Object with token and new password** - `type:` **TokenResetPassword** ## Response: - `return:` **Message object** - `rtype:` **Msg** \f :param settings: Dependency method for cached setting object :type settings: config.Settings :param user_service: Dependency method for User service object :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param init_settings: Dependency method for cached init setting object :type init_settings: InitSettings """ email: Optional[EmailStr] = verify_password_reset_token( token_reset_password.token, auth_settings ) if not email: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid or expired token", ) try: found_user: Optional[ UserResponse ] = await user_service.get_user_by_email(email) except ServiceException as exc: logger.error(exc) raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail="There was an issue with the request", ) from exc if not found_user: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail="User not found" ) user_data: dict[str, Any] = found_user.model_dump() user_data["password"] = token_reset_password.password user_update: UserUpdate = UserUpdate(**user_data)
""" Authentication API Router. This module provides login and password recovery functionality. """ logger: logging.Logger = logging.getLogger(__name__) router: APIRouter = APIRouter(prefix="/auth", tags=["auth"]) @router.post("/login", response_model=TokenResponse) async def login( request: Request, auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], user: Annotated[OAuth2PasswordRequestForm, Depends()], user_service: Annotated[UserService, Depends(get_user_service)], redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore ) -> TokenResponse: """ Endpoint to handle user login with OAuth2 authentication using request form. ## Parameter: - `user:` **Request body with username and password** - `type:` **OAuth2PasswordRequestForm** ## Response: - `return:` **Token information with access token, its type and refresh token** - `rtype:` **TokenResponse** \f :param request: Request object for client host information :type request: Request :param user_service: Dependency method for User Service :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param redis: Dependency method for async Redis connection :type redis: Redis """ client: Optional[Address] = request.client if not client: raise NotFoundException(auth_settings.NO_CLIENT_FOUND) client_ip: str = client.host try: found_user: UserDB = await user_service.get_login_user(user.username) except ServiceException as exc: logger.error(exc) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail="Invalid credentials" ) from exc if not verify_password(found_user.password, user.password): detail: str = "Incorrect password" logger.warning(detail) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=detail ) if not found_user.is_active: user_detail: str = "Inactive user" logger.warning(user_detail) raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=user_detail ) return await common_auth_procedure( found_user, client_ip, redis, auth_settings ) @router.post( "/refresh", response_model=TokenResponse, status_code=status.HTTP_201_CREATED, ) async def refresh_token( request: Request, user_service: Annotated[UserService, Depends(get_user_service)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], refresh_current_user: Annotated[ UserAuth, Depends(get_refresh_current_user) ], redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore ) -> TokenResponse: """ Generates a refresh token for the current user and saves it to the database ## Response: - `return:` **Token information with access token, its type and refresh token** - `rtype:` **TokenResponse** \f :param request: The HTTP request on the server :type request: Request :param user_service: Dependency method for User Service :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param refresh_current_user: The current user dependency for refresh token :type refresh_current_user: UserAuth :param redis: Dependency method for async Redis connection :type redis: Redis """ client: Optional[Address] if not (client := request.client): raise NotFoundException(auth_settings.NO_CLIENT_FOUND) client_ip: str = client.host try: user: UserDB = await user_service.get_login_user( refresh_current_user.username ) except ServiceException as exc: detail: str = "Can not found user information." logger.error(detail) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=detail ) from exc return await common_auth_procedure(user, client_ip, redis, auth_settings) @router.post("/validate-token", response_model=UserAuth) async def validate_token( current_user: Annotated[UserAuth, Depends(get_current_user)] ) -> UserAuth: """ Endpoint to validate an access token. ## Response: - `return:` **The authenticated user instance** - `rtype:` **UserAuth** \f :param current_user: The current user :type current_user: UserAuth """ return current_user @router.post("/recover-password/{email}", response_model=Msg) async def recover_password( settings: Annotated[Settings, Depends(get_settings)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], email: Annotated[ EmailStr, Path( ..., title="Email", description="The email used to recover the password", example={"email": "someone@example.com"}, openapi_examples=init_setting.EMAIL_BODY_EXAMPLES, ), ], user_service: Annotated[UserService, Depends(get_user_service)], init_settings: Annotated[InitSettings, Depends(get_init_settings)], ) -> Msg: """ Endpoint to handle password recovery. ## Parameter: - `email:` **Path parameter that references the email used to recover the password** - `type:` **EmailStr** ## Response: - `return:` **Message object** - `rtype:` **Msg** \f :param user_service: Dependency method for User service object :type user_service: UserService :param settings: Dependency method for cached setting object :type settings: config.Settings :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param init_settings: Dependency method for cached init setting object :type init_settings: InitSettings """ try: user: Optional[UserResponse] = await user_service.get_user_by_email( email ) except ServiceException as exc: logger.error(exc) user = None if user: password_reset_token: str = generate_password_reset_token( email, auth_settings ) await send_reset_password_email( user.email, user.username, password_reset_token, settings, init_settings, auth_settings, ) return Msg(msg="If the email is registered, a reset link will be sent.") @router.post("/reset-password", response_model=Msg) async def reset_password( settings: Annotated[Settings, Depends(get_settings)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], user_service: Annotated[UserService, Depends(get_user_service)], token_reset_password: Annotated[ TokenResetPassword, Body( ..., title="Body object", description="Object with access token and new password", openapi_examples=init_setting.TOKEN_PAYLOAD_EXAMPLES, ), ], init_settings: Annotated[InitSettings, Depends(get_init_settings)], ) -> Msg: """ Endpoint to handle password reset. ## Parameter: - `token_reset_password:` **Body Object with token and new password** - `type:` **TokenResetPassword** ## Response: - `return:` **Message object** - `rtype:` **Msg** \f :param settings: Dependency method for cached setting object :type settings: config.Settings :param user_service: Dependency method for User service object :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param init_settings: Dependency method for cached init setting object :type init_settings: InitSettings """ email: Optional[EmailStr] = verify_password_reset_token( token_reset_password.token, auth_settings ) if not email: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid or expired token", ) try: found_user: Optional[ UserResponse ] = await user_service.get_user_by_email(email) except ServiceException as exc: logger.error(exc) raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail="There was an issue with the request", ) from exc if not found_user: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail="User not found" ) user_data: dict[str, Any] = found_user.model_dump() user_data["password"] = token_reset_password.password user_update: UserUpdate = UserUpdate(**user_data)
user: UserUpdateResponse = await user_service.update_user( # type: ignore
16
2023-11-17 00:32:32+00:00
24k
fg320/DEASC
examples/12C_5x1_farm_dyn_tuning_wso_grouping_looping.py
[ { "identifier": "WfModel", "path": "deasc/wf_model.py", "snippet": "class WfModel:\n \"\"\"\n Class for wind farm modelling (Interface setup but not limited to FLORIS\n framework).\n \"\"\"\n\n def __init__(self, input_file, path):\n \"\"\"\n Initialise wind farm object by p...
import numpy as np from deasc import WfModel from deasc import WSOpt from deasc import Tuning from deasc import GPWrap from deasc import TuningDyn_Grouping from deasc import TuningDyn_Looping_Turbine from deasc.utils_floris import ( floris_extract_object_dict, floris_extract_parameter, floris_param_change_object_dict, floris_param_change_object )
15,511
""" This example shows wake steering optimisation on a 5x1 wind farm of NREL 5 MW turbines. Dynamic parameter tuning with the looping approach is implemented to refine the results achieved with grouping. Tuning is introduced in the optimisation for the wake expansion parameter k of the Jensen wake model. The tuning variables are the yaw angles of all wind turbines in the farm, excluding the most downstream one. """ # %% Initial wake steering optimisation - Grouping approach for dynamic parameter tuning # Initialise and set layout for wind farm model path = "./inputs/" input_file = "jensen.yaml" wf_model = WfModel(input_file, path) wf_model.set_aligned_layout(5, 1, 7, 5) # Set kd deflection parameter wf_model_dict = floris_extract_object_dict(wf_model) wf_model_dict = floris_param_change_object_dict(wf_model_dict, 'wake_deflection_parameters', 'kd', 0.3) wf_model = floris_param_change_object(wf_model, wf_model_dict) # Specify atmopheric conditions ws = 8.0 wd = 270 ti = 0.05 shear = 0.0 # Wake steering optimisation inputs yaw_initial = np.full(shape=(5), fill_value=0) inflow = (yaw_initial, wd, ws, ti, shear) variables = [1, 2, 3, 4] var_bounds = (-25, 25) var_initial = np.full(shape=(len(variables)), fill_value=0) # Dynamic tuning object # Parameter info parameter_class = 'wake_velocity_parameters' parameter_name = 'we' # Import optimal parameter dataset and extract GP input dataset_path = "./optimal_parameter_datasets/" dataset_import = np.load(dataset_path+'we_5x1_2dim_grouping.npy', allow_pickle=True) optimal_parameter_dataset = dataset_import.item() yaw_data = [] param_data = [] for key in optimal_parameter_dataset.keys(): yaw_data.append([key[0], key[2]]) # Extract group yaw param_data.append([optimal_parameter_dataset[key]]) # Construct Gaussian Process (GP) GP_obj = GPWrap(parameter_class=parameter_class, parameter_name=parameter_name, dimensions=2) GP_model = GP_obj.GP_so(yaw_data, param_data, num_restarts=100, noise=0.05) # Tuning object initialisation
""" This example shows wake steering optimisation on a 5x1 wind farm of NREL 5 MW turbines. Dynamic parameter tuning with the looping approach is implemented to refine the results achieved with grouping. Tuning is introduced in the optimisation for the wake expansion parameter k of the Jensen wake model. The tuning variables are the yaw angles of all wind turbines in the farm, excluding the most downstream one. """ # %% Initial wake steering optimisation - Grouping approach for dynamic parameter tuning # Initialise and set layout for wind farm model path = "./inputs/" input_file = "jensen.yaml" wf_model = WfModel(input_file, path) wf_model.set_aligned_layout(5, 1, 7, 5) # Set kd deflection parameter wf_model_dict = floris_extract_object_dict(wf_model) wf_model_dict = floris_param_change_object_dict(wf_model_dict, 'wake_deflection_parameters', 'kd', 0.3) wf_model = floris_param_change_object(wf_model, wf_model_dict) # Specify atmopheric conditions ws = 8.0 wd = 270 ti = 0.05 shear = 0.0 # Wake steering optimisation inputs yaw_initial = np.full(shape=(5), fill_value=0) inflow = (yaw_initial, wd, ws, ti, shear) variables = [1, 2, 3, 4] var_bounds = (-25, 25) var_initial = np.full(shape=(len(variables)), fill_value=0) # Dynamic tuning object # Parameter info parameter_class = 'wake_velocity_parameters' parameter_name = 'we' # Import optimal parameter dataset and extract GP input dataset_path = "./optimal_parameter_datasets/" dataset_import = np.load(dataset_path+'we_5x1_2dim_grouping.npy', allow_pickle=True) optimal_parameter_dataset = dataset_import.item() yaw_data = [] param_data = [] for key in optimal_parameter_dataset.keys(): yaw_data.append([key[0], key[2]]) # Extract group yaw param_data.append([optimal_parameter_dataset[key]]) # Construct Gaussian Process (GP) GP_obj = GPWrap(parameter_class=parameter_class, parameter_name=parameter_name, dimensions=2) GP_model = GP_obj.GP_so(yaw_data, param_data, num_restarts=100, noise=0.05) # Tuning object initialisation
tuning_dyn_obj = TuningDyn_Grouping(param_class=parameter_class,
4
2023-11-10 18:13:27+00:00
24k
PlaxtonFlarion/NexaFlow
nexaflow/skills/alynex.py
[ { "identifier": "toolbox", "path": "nexaflow/toolbox.py", "snippet": "def video_capture(video_path: str):\ndef video_jump(video_cap: cv2.VideoCapture, frame_id: int):\ndef compare_ssim(pic1: np.ndarray, pic2: np.ndarray) -> float:\ndef multi_compare_ssim(\n pic1_list: typing.List, pic2_list: typing.L...
import os import cv2 import time import random import asyncio from loguru import logger from typing import List, Union, Optional from concurrent.futures import ThreadPoolExecutor from nexaflow import toolbox from nexaflow.skills.report import Report from nexaflow.skills.record import Record from nexaflow.skills.player import Player from nexaflow.skills.switch import Switch from nexaflow.cutter.cutter import VideoCutter from nexaflow.video import VideoObject, Frame from nexaflow.classifier.keras_classifier import KerasClassifier from nexaflow.hook import BaseHook, CropHook, OmitHook, FrameSaveHook from nexaflow.classifier.base import ClassifierResult, SingleClassifierResult
16,146
class Alynex(object): target_size: tuple = (350, 700) fps: int = 60 step: int = 1 block: int = 6 threshold: Union[int | float] = 0.97 offset: int = 3 compress_rate: float = 0.5 window_size: int = 1 window_coefficient: int = 2 kc: KerasClassifier = KerasClassifier( target_size=target_size, data_size=target_size ) def __init__(self): self.__report: Optional[Report] = None self.__record: Optional[Record] = Record()
class Alynex(object): target_size: tuple = (350, 700) fps: int = 60 step: int = 1 block: int = 6 threshold: Union[int | float] = 0.97 offset: int = 3 compress_rate: float = 0.5 window_size: int = 1 window_coefficient: int = 2 kc: KerasClassifier = KerasClassifier( target_size=target_size, data_size=target_size ) def __init__(self): self.__report: Optional[Report] = None self.__record: Optional[Record] = Record()
self.__player: Optional[Player] = Player()
3
2023-11-13 05:27:34+00:00
24k
deepseek-ai/DreamCraft3D
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
16,118
mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh = other.isosurface() instance.isosurface_bbox = mesh.extras["bbox"] instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh = other.isosurface() instance.isosurface_bbox = mesh.extras["bbox"] instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
elif isinstance(other, ImplicitSDF):
3
2023-10-23 07:40:20+00:00
24k
microsoft/SoM
task_adapter/semantic_sam/tasks/inference_semsam_m2m_auto.py
[ { "identifier": "Visualizer", "path": "task_adapter/utils/visualizer.py", "snippet": "class Visualizer:\n \"\"\"\n Visualizer that draws data about detection/segmentation on images.\n\n It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}`\n that draw primitive objects t...
import torch import numpy as np import matplotlib.pyplot as plt import cv2 import io import cv2 # type: ignore from torchvision import transforms from task_adapter.utils.visualizer import Visualizer from typing import Tuple from PIL import Image from detectron2.data import MetadataCatalog from .automatic_mask_generator import SemanticSamAutomaticMaskGenerator from task_adapter.utils.visualizer import Visualizer
15,527
# -------------------------------------------------------- # Semantic-SAM: Segment and Recognize Anything at Any Granularity # Copyright (c) 2023 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Hao Zhang (hzhangcx@connect.ust.hk) # -------------------------------------------------------- metadata = MetadataCatalog.get('coco_2017_train_panoptic') def inference_semsam_m2m_auto(model, image, level, all_classes, all_parts, thresh, text_size, hole_scale, island_scale, semantic, refimg=None, reftxt=None, audio_pth=None, video_pth=None, label_mode='1', alpha=0.1, anno_mode=['Mask']): t = [] t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC)) transform1 = transforms.Compose(t) image_ori = transform1(image) image_ori = np.asarray(image_ori) images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda()
# -------------------------------------------------------- # Semantic-SAM: Segment and Recognize Anything at Any Granularity # Copyright (c) 2023 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Hao Zhang (hzhangcx@connect.ust.hk) # -------------------------------------------------------- metadata = MetadataCatalog.get('coco_2017_train_panoptic') def inference_semsam_m2m_auto(model, image, level, all_classes, all_parts, thresh, text_size, hole_scale, island_scale, semantic, refimg=None, reftxt=None, audio_pth=None, video_pth=None, label_mode='1', alpha=0.1, anno_mode=['Mask']): t = [] t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC)) transform1 = transforms.Compose(t) image_ori = transform1(image) image_ori = np.asarray(image_ori) images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda()
mask_generator = SemanticSamAutomaticMaskGenerator(model,points_per_side=32,
1
2023-10-16 03:39:26+00:00
24k
hkchengrex/Cutie
gui/main_controller.py
[ { "identifier": "CUTIE", "path": "cutie/model/cutie.py", "snippet": "class CUTIE(nn.Module):\n def __init__(self, cfg: DictConfig, *, single_object=False):\n super().__init__()\n model_cfg = cfg.model\n self.ms_dims = model_cfg.pixel_encoder.ms_dims\n self.key_dim = model_...
import os import logging import cv2 import torch import numpy as np from os import path from typing import Literal from torch import mps from torch import autocast from torchvision.transforms.functional import to_tensor from omegaconf import DictConfig, open_dict from cutie.model.cutie import CUTIE from cutie.inference.inference_core import InferenceCore from gui.interaction import * from gui.interactive_utils import * from gui.resource_manager import ResourceManager from gui.gui import GUI from gui.click_controller import ClickController from gui.reader import PropagationReader, get_data_loader from gui.exporter import convert_frames_to_video, convert_mask_to_binary from scripts.download_models import download_models_if_needed
17,325
if loaded_mask is None: self.curr_mask.fill(0) else: self.curr_mask = loaded_mask.copy() self.curr_prob = None def convert_current_image_mask_torch(self, no_mask: bool = False): if self.curr_image_torch is None: self.curr_image_torch = to_tensor(self.curr_image_np).to(self.device, non_blocking=True) if self.curr_prob is None and not no_mask: self.curr_prob = index_numpy_to_one_hot_torch(self.curr_mask, self.num_objects + 1).to( self.device, non_blocking=True) def compose_current_im(self): self.vis_image = get_visualization(self.vis_mode, self.curr_image_np, self.curr_mask, self.overlay_layer, self.vis_target_objects) def update_canvas(self): self.gui.set_canvas(self.vis_image) def update_current_image_fast(self): # fast path, uses gpu. Changes the image in-place to avoid copying # thus current_image_torch must be voided afterwards self.vis_image = get_visualization_torch(self.vis_mode, self.curr_image_torch, self.curr_prob, self.overlay_layer_torch, self.vis_target_objects) self.curr_image_torch = None self.vis_image = np.ascontiguousarray(self.vis_image) if self.save_visualization: self.res_man.save_visualization(self.curr_ti, self.vis_mode, self.vis_image) if self.save_soft_mask: self.res_man.save_soft_mask(self.curr_ti, self.curr_prob.cpu().numpy()) self.gui.set_canvas(self.vis_image) def show_current_frame(self, fast: bool = False): # Re-compute overlay and show the image if fast: self.update_current_image_fast() else: self.compose_current_im() if self.save_visualization: self.res_man.save_visualization(self.curr_ti, self.vis_mode, self.vis_image) self.update_canvas() self.gui.update_slider(self.curr_ti) self.gui.frame_name.setText(self.res_man.names[self.curr_ti] + '.jpg') def set_vis_mode(self): self.vis_mode = self.gui.combo.currentText() self.show_current_frame() def save_current_mask(self): # save mask to hard disk self.res_man.save_mask(self.curr_ti, self.curr_mask) def on_slider_update(self): # if we are propagating, the on_run function will take care of everything # don't do duplicate work here self.curr_ti = self.gui.tl_slider.value() if not self.propagating: # with self.vis_cond: # self.vis_cond.notify() if self.curr_frame_dirty: self.save_current_mask() self.curr_frame_dirty = False self.reset_this_interaction() self.curr_ti = self.gui.tl_slider.value() self.load_current_image_mask() self.show_current_frame() def on_forward_propagation(self): if self.propagating: # acts as a pause button self.propagating = False self.propagate_direction = 'none' else: self.propagate_fn = self.on_next_frame self.gui.forward_propagation_start() self.propagate_direction = 'forward' self.on_propagate() def on_backward_propagation(self): if self.propagating: # acts as a pause button self.propagating = False self.propagate_direction = 'none' else: self.propagate_fn = self.on_prev_frame self.gui.backward_propagation_start() self.propagate_direction = 'backward' self.on_propagate() def on_pause(self): self.propagating = False self.gui.text(f'Propagation stopped at t={self.curr_ti}.') self.gui.pause_propagation() def on_propagate(self): # start to propagate with autocast(self.device, enabled=(self.amp and self.device == 'cuda')): self.convert_current_image_mask_torch() self.gui.text(f'Propagation started at t={self.curr_ti}.') self.processor.clear_sensory_memory() self.curr_prob = self.processor.step(self.curr_image_torch, self.curr_prob[1:], idx_mask=False) self.curr_mask = torch_prob_to_numpy_mask(self.curr_prob) # clear self.interacted_prob = None self.reset_this_interaction() self.show_current_frame(fast=True) self.propagating = True self.gui.clear_all_mem_button.setEnabled(False) self.gui.clear_non_perm_mem_button.setEnabled(False) self.gui.tl_slider.setEnabled(False)
# fix conflicts between qt5 and cv2 os.environ.pop("QT_QPA_PLATFORM_PLUGIN_PATH") try: except: print('torch.MPS not available.') log = logging.getLogger() class MainController(): def __init__(self, cfg: DictConfig) -> None: super().__init__() self.initialized = False # setting up the workspace if cfg["workspace"] is None: if cfg["images"] is not None: basename = path.basename(cfg["images"]) elif cfg["video"] is not None: basename = path.basename(cfg["video"])[:-4] else: raise NotImplementedError('Either images, video, or workspace has to be specified') cfg["workspace"] = path.join(cfg['workspace_root'], basename) # reading arguments self.cfg = cfg self.num_objects = cfg['num_objects'] self.device = cfg['device'] self.amp = cfg['amp'] # initializing the network(s) self.initialize_networks() # main components self.res_man = ResourceManager(cfg) self.processor = InferenceCore(self.cutie, self.cfg) self.gui = GUI(self, self.cfg) # initialize control info self.length: int = self.res_man.length self.interaction: Interaction = None self.interaction_type: str = 'Click' self.curr_ti: int = 0 self.curr_object: int = 1 self.propagating: bool = False self.propagate_direction: Literal['forward', 'backward', 'none'] = 'none' self.last_ex = self.last_ey = 0 # current frame info self.curr_frame_dirty: bool = False self.curr_image_np: np.ndarray = np.zeros((self.h, self.w, 3), dtype=np.uint8) self.curr_image_torch: torch.Tensor = None self.curr_mask: np.ndarray = np.zeros((self.h, self.w), dtype=np.uint8) self.curr_prob: torch.Tensor = torch.zeros((self.num_objects + 1, self.h, self.w), dtype=torch.float).to(self.device) self.curr_prob[0] = 1 # visualization info self.vis_mode: str = 'davis' self.vis_image: np.ndarray = None self.save_visualization: bool = False self.save_soft_mask: bool = False self.interacted_prob: torch.Tensor = None self.overlay_layer: np.ndarray = None self.overlay_layer_torch: torch.Tensor = None # the object id used for popup/layer overlay self.vis_target_objects = list(range(1, self.num_objects + 1)) self.load_current_image_mask() self.show_current_frame() # initialize stuff self.update_memory_gauges() self.update_gpu_gauges() self.gui.work_mem_min.setValue(self.processor.memory.min_mem_frames) self.gui.work_mem_max.setValue(self.processor.memory.max_mem_frames) self.gui.long_mem_max.setValue(self.processor.memory.max_long_tokens) self.gui.mem_every_box.setValue(self.processor.mem_every) # for exporting videos self.output_fps = cfg['output_fps'] self.output_bitrate = cfg['output_bitrate'] # set callbacks self.gui.on_mouse_motion_xy = self.on_mouse_motion_xy self.gui.click_fn = self.click_fn self.gui.show() self.gui.text('Initialized.') self.initialized = True # try to load the default overlay self._try_load_layer('./docs/uiuc.png') self.gui.set_object_color(self.curr_object) self.update_config() def initialize_networks(self) -> None: download_models_if_needed() self.cutie = CUTIE(self.cfg).eval().to(self.device) model_weights = torch.load(self.cfg.weights, map_location=self.device) self.cutie.load_weights(model_weights) self.click_ctrl = ClickController(self.cfg.ritm_weights, device=self.device) def hit_number_key(self, number: int): if number == self.curr_object: return self.curr_object = number self.gui.object_dial.setValue(number) if self.click_ctrl is not None: self.click_ctrl.unanchor() self.gui.text(f'Current object changed to {number}.') self.gui.set_object_color(number) self.show_current_frame() def click_fn(self, action: Literal['left', 'right', 'middle'], x: int, y: int): if self.propagating: return last_interaction = self.interaction new_interaction = None with autocast(self.device, enabled=(self.amp and self.device == 'cuda')): if action in ['left', 'right']: # left: positive click # right: negative click self.convert_current_image_mask_torch() image = self.curr_image_torch if (last_interaction is None or last_interaction.tar_obj != self.curr_object): # create new interaction is needed self.complete_interaction() self.click_ctrl.unanchor() new_interaction = ClickInteraction(image, self.curr_prob, (self.h, self.w), self.click_ctrl, self.curr_object) if new_interaction is not None: self.interaction = new_interaction self.interaction.push_point(x, y, is_neg=(action == 'right')) self.interacted_prob = self.interaction.predict().to(self.device, non_blocking=True) self.update_interacted_mask() self.update_gpu_gauges() elif action == 'middle': # middle: select a new visualization object target_object = self.curr_mask[int(y), int(x)] if target_object in self.vis_target_objects: self.vis_target_objects.remove(target_object) else: self.vis_target_objects.append(target_object) self.gui.text(f'Overlay target(s) changed to {self.vis_target_objects}') self.show_current_frame() return else: raise NotImplementedError def load_current_image_mask(self, no_mask: bool = False): self.curr_image_np = self.res_man.get_image(self.curr_ti) self.curr_image_torch = None if not no_mask: loaded_mask = self.res_man.get_mask(self.curr_ti) if loaded_mask is None: self.curr_mask.fill(0) else: self.curr_mask = loaded_mask.copy() self.curr_prob = None def convert_current_image_mask_torch(self, no_mask: bool = False): if self.curr_image_torch is None: self.curr_image_torch = to_tensor(self.curr_image_np).to(self.device, non_blocking=True) if self.curr_prob is None and not no_mask: self.curr_prob = index_numpy_to_one_hot_torch(self.curr_mask, self.num_objects + 1).to( self.device, non_blocking=True) def compose_current_im(self): self.vis_image = get_visualization(self.vis_mode, self.curr_image_np, self.curr_mask, self.overlay_layer, self.vis_target_objects) def update_canvas(self): self.gui.set_canvas(self.vis_image) def update_current_image_fast(self): # fast path, uses gpu. Changes the image in-place to avoid copying # thus current_image_torch must be voided afterwards self.vis_image = get_visualization_torch(self.vis_mode, self.curr_image_torch, self.curr_prob, self.overlay_layer_torch, self.vis_target_objects) self.curr_image_torch = None self.vis_image = np.ascontiguousarray(self.vis_image) if self.save_visualization: self.res_man.save_visualization(self.curr_ti, self.vis_mode, self.vis_image) if self.save_soft_mask: self.res_man.save_soft_mask(self.curr_ti, self.curr_prob.cpu().numpy()) self.gui.set_canvas(self.vis_image) def show_current_frame(self, fast: bool = False): # Re-compute overlay and show the image if fast: self.update_current_image_fast() else: self.compose_current_im() if self.save_visualization: self.res_man.save_visualization(self.curr_ti, self.vis_mode, self.vis_image) self.update_canvas() self.gui.update_slider(self.curr_ti) self.gui.frame_name.setText(self.res_man.names[self.curr_ti] + '.jpg') def set_vis_mode(self): self.vis_mode = self.gui.combo.currentText() self.show_current_frame() def save_current_mask(self): # save mask to hard disk self.res_man.save_mask(self.curr_ti, self.curr_mask) def on_slider_update(self): # if we are propagating, the on_run function will take care of everything # don't do duplicate work here self.curr_ti = self.gui.tl_slider.value() if not self.propagating: # with self.vis_cond: # self.vis_cond.notify() if self.curr_frame_dirty: self.save_current_mask() self.curr_frame_dirty = False self.reset_this_interaction() self.curr_ti = self.gui.tl_slider.value() self.load_current_image_mask() self.show_current_frame() def on_forward_propagation(self): if self.propagating: # acts as a pause button self.propagating = False self.propagate_direction = 'none' else: self.propagate_fn = self.on_next_frame self.gui.forward_propagation_start() self.propagate_direction = 'forward' self.on_propagate() def on_backward_propagation(self): if self.propagating: # acts as a pause button self.propagating = False self.propagate_direction = 'none' else: self.propagate_fn = self.on_prev_frame self.gui.backward_propagation_start() self.propagate_direction = 'backward' self.on_propagate() def on_pause(self): self.propagating = False self.gui.text(f'Propagation stopped at t={self.curr_ti}.') self.gui.pause_propagation() def on_propagate(self): # start to propagate with autocast(self.device, enabled=(self.amp and self.device == 'cuda')): self.convert_current_image_mask_torch() self.gui.text(f'Propagation started at t={self.curr_ti}.') self.processor.clear_sensory_memory() self.curr_prob = self.processor.step(self.curr_image_torch, self.curr_prob[1:], idx_mask=False) self.curr_mask = torch_prob_to_numpy_mask(self.curr_prob) # clear self.interacted_prob = None self.reset_this_interaction() self.show_current_frame(fast=True) self.propagating = True self.gui.clear_all_mem_button.setEnabled(False) self.gui.clear_non_perm_mem_button.setEnabled(False) self.gui.tl_slider.setEnabled(False)
dataset = PropagationReader(self.res_man, self.curr_ti, self.propagate_direction)
5
2023-10-19 17:49:24+00:00
24k
ZhengyiLuo/PerpetualHumanoidControl
scripts/vis/vis_smpl_o3d_multi.py
[ { "identifier": "SMPL_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPL_Parser(_SMPL):\n\n def __init__(self, create_transl=False, *args, **kwargs):\n \"\"\"SMPL model constructor\n Parameters\n ----------\n model_path: str\n The path to the ...
import glob import os import sys import pdb import os.path as osp import open3d as o3d import open3d.visualization.rendering as rendering import imageio import joblib import numpy as np import torch import random import matplotlib.pyplot as plt import cv2 import matplotlib as mpl from tqdm import tqdm from uhc.smpllib.smpl_parser import ( SMPL_Parser, SMPLH_Parser, SMPLX_Parser, ) from uhc.smpllib.smpl_mujoco import SMPL_BONE_ORDER_NAMES as joint_names from poselib.poselib.skeleton.skeleton3d import SkeletonTree, SkeletonMotion, SkeletonState from scipy.spatial.transform import Rotation as sRot from tqdm import tqdm
19,852
sys.path.append(os.getcwd()) paused, reset, recording, image_list, writer, control, curr_zoom = False, False, False, [], None, None, 0.01 def pause_func(action): global paused paused = not paused print(f"Paused: {paused}") return True def reset_func(action): global reset reset = not reset print(f"Reset: {reset}") return True def record_func(action): global recording, writer if not recording: fps = 30 curr_video_file_name = "test.mp4" writer = imageio.get_writer(curr_video_file_name, fps=fps, macro_block_size=None) elif not writer is None: writer.close() writer = None recording = not recording print(f"Recording: {recording}") return True def capture_func(action): global capture capture = not capture return True def zoom_func(action): global control, curr_zoom curr_zoom = curr_zoom * 0.9 control.set_zoom(curr_zoom) print(f"Reset: {reset}") return True mujoco_joint_names = ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand'] Name = "getting_started" Title = "Getting Started" data_dir = "data/smpl" smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral") smpl_parser_m = SMPL_Parser(model_path=data_dir, gender="male") smpl_parser_f = SMPL_Parser(model_path=data_dir, gender="female") # pkl_dir = "output/renderings/smpl_ego_long_8-2023-01-20-11:28:00.pkl" # pkl_dir = "output/renderings/smpl_im_comp_8-2023-02-05-15:36:14.pkl" pkl_dir = "output/renderings/smpl_im_comp_pnn_3-2023-03-07-14:31:50.pkl" Name = pkl_dir.split("/")[-1].split(".")[0] pkl_data = joblib.load(pkl_dir)
sys.path.append(os.getcwd()) paused, reset, recording, image_list, writer, control, curr_zoom = False, False, False, [], None, None, 0.01 def pause_func(action): global paused paused = not paused print(f"Paused: {paused}") return True def reset_func(action): global reset reset = not reset print(f"Reset: {reset}") return True def record_func(action): global recording, writer if not recording: fps = 30 curr_video_file_name = "test.mp4" writer = imageio.get_writer(curr_video_file_name, fps=fps, macro_block_size=None) elif not writer is None: writer.close() writer = None recording = not recording print(f"Recording: {recording}") return True def capture_func(action): global capture capture = not capture return True def zoom_func(action): global control, curr_zoom curr_zoom = curr_zoom * 0.9 control.set_zoom(curr_zoom) print(f"Reset: {reset}") return True mujoco_joint_names = ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand'] Name = "getting_started" Title = "Getting Started" data_dir = "data/smpl" smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral") smpl_parser_m = SMPL_Parser(model_path=data_dir, gender="male") smpl_parser_f = SMPL_Parser(model_path=data_dir, gender="female") # pkl_dir = "output/renderings/smpl_ego_long_8-2023-01-20-11:28:00.pkl" # pkl_dir = "output/renderings/smpl_im_comp_8-2023-02-05-15:36:14.pkl" pkl_dir = "output/renderings/smpl_im_comp_pnn_3-2023-03-07-14:31:50.pkl" Name = pkl_dir.split("/")[-1].split(".")[0] pkl_data = joblib.load(pkl_dir)
mujoco_2_smpl = [mujoco_joint_names.index(q) for q in joint_names if q in mujoco_joint_names]
0
2023-10-15 19:05:47+00:00
24k
e4s2023/E4S2023
run_UI_seg19.py
[ { "identifier": "UIOptions", "path": "options/ui_options.py", "snippet": "class UIOptions:\n\n\tdef __init__(self):\n\t\tself.parser = ArgumentParser()\n\t\tself.initialize()\n\n\tdef initialize(self):\n\t\tself.parser.add_argument('--exp_dir', type=str, default=\"/apdcephfs/share_1290939/zhianliu/runni...
from options.ui_options import UIOptions from PyQt5.QtCore import * from PyQt5.QtGui import * from PyQt5.QtWidgets import * from PyQt5.QtPrintSupport import QPrintDialog, QPrinter from ui_run.ui import Ui_Form from ui_run.mouse_event import GraphicsScene from ui_run.util import number_color, color_pred,celebAHQ_masks_to_faceParser_mask_detailed, my_number_object, COMPS from PIL import Image from PyQt5 import QtGui from models.networks import Net3 from glob import glob from utils import torch_utils from datasets.dataset import CelebAHQDataset, get_transforms, TO_TENSOR, NORMALIZE import sys import cv2 import skimage.io import qdarkstyle import qdarkgraystyle import os import numpy as np import skimage.io import os import torch import copy import torchvision.transforms as transforms
18,980
@pyqtSlot() def change_alpha_value(self): self.alpha = self.alphaSlider.value() / 20 self.alphaLabel.setText('Alpha: %.2f' % self.alpha) @pyqtSlot() def switch_labels(self, label): # 换了一种label颜色按钮 self.scene.label = label self.scene.color = number_color[label] self.color_Button.setStyleSheet("background-color: %s;" % self.scene.color) @pyqtSlot() def undo(self): self.scene.undo() def __init__(self, opt): super().__init__() self.init_deep_model(opt) self.setupUi(self) self.show() # 下面都是一些默认值 self.modes = 0 self.alpha = 1 # 插值的alpha self.ref_style_img_path = None self.mouse_clicked = False self.scene = GraphicsScene(self.modes, self) # 用来编辑的 scene self.scene.setSceneRect(0, 0, 512, 512) self.graphicsView.setScene(self.scene) self.graphicsView.setAlignment(Qt.AlignCenter) self.graphicsView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.graphicsView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.result_scene = QGraphicsScene() self.graphicsView_2.setScene(self.result_scene) self.graphicsView_2.setAlignment(Qt.AlignCenter) self.graphicsView_2.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.graphicsView_2.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.GT_scene = QGraphicsScene() self.graphicsView_GT.setScene(self.GT_scene) self.graphicsView_GT.setAlignment(Qt.AlignCenter) self.graphicsView_GT.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.graphicsView_GT.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.dlg = QColorDialog(self.graphicsView) self.init_screen() # 初始化screen def init_screen(self): #self.image = QPixmap(self.graphicsView.size()) self.image = QPixmap(QSize(512, 512)) # 这张是待编辑的mask可视化图片 self.image.fill(QColor('#FFFFFF')) self.mat_img = np.zeros([512, 512, 3], np.uint8) # mask图片, [0-12], 3通道 self.mat_img_org = self.mat_img.copy() self.GT_img_path = None GT_img = np.ones([512, 512, 3], np.uint8)*255 self.GT_img = Image.fromarray(GT_img) self.GT_img = self.GT_img.convert('RGB') #################### add GT image self.update_GT_image(GT_img) ##################### self.scene.reset() if len(self.scene.items()) > 0: self.scene.reset_items() self.scene.addPixmap(self.image) ############### load average features # TODO: 把这两行注释打开 # self.load_average_feature() # self.run_deep_model() self.recorded_img_names = [] self.clean_snapshots() self.clean_generated_result() def init_deep_model(self, opt): # 初始化模型 self.opt = opt assert self.opt.checkpoint_path is not None, "please specify the pre-trained weights!" print("Loading model and weights, please wait a few seconds...") self.net = Net3(self.opt).eval().to(self.opt.device) ckpt_dict=torch.load(self.opt.checkpoint_path) self.net.latent_avg = ckpt_dict['latent_avg'].to(self.opt.device) if self.opt.start_from_latent_avg else None self.net.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict["state_dict"],prefix="module.")) print("Loading Done!") # 固定noise channels = { 4: 512, 8: 512, 16: 512, 32: 512, 64: 256 * 2, 128: 128 * 2, 256: 64 * 2, 512: 32 * 2, 1024: 16 * 2, } self.noise = [torch.randn(1,512,4,4).to(self.opt.device)] for i in [8,16,32,64,128,256,512,1024]: self.noise.append(torch.randn(1,channels[i],i,i).to(self.opt.device)) self.noise.append(torch.randn(1,channels[i],i,i).to(self.opt.device)) # =================================================== def editing(self): # 生成编辑的结果
class ExWindow(QMainWindow): def __init__(self, opt): super().__init__() self.EX = Ex(opt) self.setWindowIcon(QtGui.QIcon('ui_run/icons/edit_icon.svg')) class Ex(QWidget, Ui_Form): @pyqtSlot() def change_brush_size(self): # 改变画刷的 粗细 self.scene.brush_size = self.brushSlider.value() self.brushsizeLabel.setText('Brush size: %d' % self.scene.brush_size) @pyqtSlot() def change_alpha_value(self): self.alpha = self.alphaSlider.value() / 20 self.alphaLabel.setText('Alpha: %.2f' % self.alpha) @pyqtSlot() def switch_labels(self, label): # 换了一种label颜色按钮 self.scene.label = label self.scene.color = number_color[label] self.color_Button.setStyleSheet("background-color: %s;" % self.scene.color) @pyqtSlot() def undo(self): self.scene.undo() def __init__(self, opt): super().__init__() self.init_deep_model(opt) self.setupUi(self) self.show() # 下面都是一些默认值 self.modes = 0 self.alpha = 1 # 插值的alpha self.ref_style_img_path = None self.mouse_clicked = False self.scene = GraphicsScene(self.modes, self) # 用来编辑的 scene self.scene.setSceneRect(0, 0, 512, 512) self.graphicsView.setScene(self.scene) self.graphicsView.setAlignment(Qt.AlignCenter) self.graphicsView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.graphicsView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.result_scene = QGraphicsScene() self.graphicsView_2.setScene(self.result_scene) self.graphicsView_2.setAlignment(Qt.AlignCenter) self.graphicsView_2.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.graphicsView_2.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.GT_scene = QGraphicsScene() self.graphicsView_GT.setScene(self.GT_scene) self.graphicsView_GT.setAlignment(Qt.AlignCenter) self.graphicsView_GT.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.graphicsView_GT.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.dlg = QColorDialog(self.graphicsView) self.init_screen() # 初始化screen def init_screen(self): #self.image = QPixmap(self.graphicsView.size()) self.image = QPixmap(QSize(512, 512)) # 这张是待编辑的mask可视化图片 self.image.fill(QColor('#FFFFFF')) self.mat_img = np.zeros([512, 512, 3], np.uint8) # mask图片, [0-12], 3通道 self.mat_img_org = self.mat_img.copy() self.GT_img_path = None GT_img = np.ones([512, 512, 3], np.uint8)*255 self.GT_img = Image.fromarray(GT_img) self.GT_img = self.GT_img.convert('RGB') #################### add GT image self.update_GT_image(GT_img) ##################### self.scene.reset() if len(self.scene.items()) > 0: self.scene.reset_items() self.scene.addPixmap(self.image) ############### load average features # TODO: 把这两行注释打开 # self.load_average_feature() # self.run_deep_model() self.recorded_img_names = [] self.clean_snapshots() self.clean_generated_result() def init_deep_model(self, opt): # 初始化模型 self.opt = opt assert self.opt.checkpoint_path is not None, "please specify the pre-trained weights!" print("Loading model and weights, please wait a few seconds...") self.net = Net3(self.opt).eval().to(self.opt.device) ckpt_dict=torch.load(self.opt.checkpoint_path) self.net.latent_avg = ckpt_dict['latent_avg'].to(self.opt.device) if self.opt.start_from_latent_avg else None self.net.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict["state_dict"],prefix="module.")) print("Loading Done!") # 固定noise channels = { 4: 512, 8: 512, 16: 512, 32: 512, 64: 256 * 2, 128: 128 * 2, 256: 64 * 2, 512: 32 * 2, 1024: 16 * 2, } self.noise = [torch.randn(1,512,4,4).to(self.opt.device)] for i in [8,16,32,64,128,256,512,1024]: self.noise.append(torch.randn(1,channels[i],i,i).to(self.opt.device)) self.noise.append(torch.randn(1,channels[i],i,i).to(self.opt.device)) # =================================================== def editing(self): # 生成编辑的结果
mat_img_seg12 = celebAHQ_masks_to_faceParser_mask_detailed(self.mat_img[:,:,0])
3
2023-10-15 12:15:01+00:00
24k
sotopia-lab/sotopia
sotopia/server.py
[ { "identifier": "Agents", "path": "sotopia/agents/llm_agent.py", "snippet": "class Agents(dict[str, BaseAgent[Observation, AgentAction]]):\n def reset(self) -> None:\n for agent in self.values():\n agent.reset()\n\n def act(self, obs: dict[str, Observation]) -> dict[str, AgentAct...
import asyncio import functools import itertools import logging import gin import rich from typing import Callable, Literal, Sequence, Type, cast from beartype import beartype from tqdm.asyncio import tqdm_asyncio from sotopia.agents import ( Agents, HumanAgent, LLMAgent, RedisAgent, ScriptWritingAgent, SpeakAgent, ) from sotopia.agents.base_agent import BaseAgent from sotopia.database import EpisodeLog from sotopia.database.persistent_profile import ( AgentProfile, EnvironmentProfile, ) from sotopia.envs import ParallelSotopiaEnv from sotopia.envs.evaluators import ( ReachGoalLLMEvaluator, RuleBasedTerminatedEvaluator, unweighted_aggregate_evaluate, ) from sotopia.generation_utils.generate import LLM_Name, agenerate_script from sotopia.messages import AgentAction, Message, Observation from sotopia.messages.message_classes import ( ScriptBackground, ScriptEnvironmentResponse, ScriptInteraction, ) from sotopia.samplers import ( BaseSampler, ConstraintBasedSampler, EnvAgentCombo, UniformSampler, )
18,742
@beartype def run_sync_server( model_name_dict: dict[str, LLM_Name], action_order: Literal["simutaneous", "round-robin", "random"], agents_info: dict[str, dict[str, str]] | None = None, partial_background_file: str | None = None, full_background_file: str | None = None, mode: str | None = None, ) -> list[tuple[str, str, Message]]: # Create Environment and agents # This step will be moved to outside this function env = ParallelSotopiaEnv( model_name=model_name_dict["env"], action_order=action_order, evaluators=[ RuleBasedTerminatedEvaluator(), ], ) if partial_background_file: environment_messages = env.reset( options={"partial_background_file": partial_background_file} ) elif full_background_file: environment_messages = env.reset( options={"full_background_file": full_background_file} ) else: environment_messages = env.reset() agents = Agents() agents_model_names = [model_name_dict["agent1"], model_name_dict["agent2"]] for agent_name, agent_model in zip(env.agents, agents_model_names): if agent_model == "human": agents[agent_name] = HumanAgent(agent_name) elif mode == "speak": agents[agent_name] = SpeakAgent(agent_name, model_name=agent_model) else: agents[agent_name] = LLMAgent(agent_name, model_name=agent_model) agents.reset() messages: list[tuple[str, str, Message]] = [] # Main Event Loop done = False for agent_name in env.agents: messages.append( ("Environment", agent_name, environment_messages[agent_name]) ) while not done: # gather agent messages agent_messages: dict[str, AgentAction] = dict() for agent_name in env.agents: if agents_info is not None: agents[agent_name].goal = agents_info[agent_name]["goal"] agent_messages[agent_name] = agents[agent_name].act( environment_messages[agent_name] ) messages.append( (agent_name, "Environment", agent_messages[agent_name]) ) # send agent messages to environment environment_messages, _, terminated, ___, ____ = env.step( agent_messages ) for agent_name in env.agents: messages.append( ("Environment", agent_name, environment_messages[agent_name]) ) done = all(terminated.values()) return messages @gin.configurable async def arun_one_episode( env: ParallelSotopiaEnv, agent_list: Sequence[BaseAgent[Observation, AgentAction]], model_dict: dict[str, LLM_Name], omniscient: bool = False, script_like: bool = False, json_in_script: bool = False, tag: str | None = None, push_to_db: bool = False, ) -> list[tuple[str, str, Message]]: agents = Agents({agent.agent_name: agent for agent in agent_list}) environment_messages = env.reset(agents=agents, omniscient=omniscient) agents_model_names = [model_dict["agent1"], model_dict["agent2"]] for agent_name, agent_model in zip(env.agents, agents_model_names): if agent_model == "human": agents[agent_name] = HumanAgent(agent_name) elif agent_model == "redis":
@beartype def run_sync_server( model_name_dict: dict[str, LLM_Name], action_order: Literal["simutaneous", "round-robin", "random"], agents_info: dict[str, dict[str, str]] | None = None, partial_background_file: str | None = None, full_background_file: str | None = None, mode: str | None = None, ) -> list[tuple[str, str, Message]]: # Create Environment and agents # This step will be moved to outside this function env = ParallelSotopiaEnv( model_name=model_name_dict["env"], action_order=action_order, evaluators=[ RuleBasedTerminatedEvaluator(), ], ) if partial_background_file: environment_messages = env.reset( options={"partial_background_file": partial_background_file} ) elif full_background_file: environment_messages = env.reset( options={"full_background_file": full_background_file} ) else: environment_messages = env.reset() agents = Agents() agents_model_names = [model_name_dict["agent1"], model_name_dict["agent2"]] for agent_name, agent_model in zip(env.agents, agents_model_names): if agent_model == "human": agents[agent_name] = HumanAgent(agent_name) elif mode == "speak": agents[agent_name] = SpeakAgent(agent_name, model_name=agent_model) else: agents[agent_name] = LLMAgent(agent_name, model_name=agent_model) agents.reset() messages: list[tuple[str, str, Message]] = [] # Main Event Loop done = False for agent_name in env.agents: messages.append( ("Environment", agent_name, environment_messages[agent_name]) ) while not done: # gather agent messages agent_messages: dict[str, AgentAction] = dict() for agent_name in env.agents: if agents_info is not None: agents[agent_name].goal = agents_info[agent_name]["goal"] agent_messages[agent_name] = agents[agent_name].act( environment_messages[agent_name] ) messages.append( (agent_name, "Environment", agent_messages[agent_name]) ) # send agent messages to environment environment_messages, _, terminated, ___, ____ = env.step( agent_messages ) for agent_name in env.agents: messages.append( ("Environment", agent_name, environment_messages[agent_name]) ) done = all(terminated.values()) return messages @gin.configurable async def arun_one_episode( env: ParallelSotopiaEnv, agent_list: Sequence[BaseAgent[Observation, AgentAction]], model_dict: dict[str, LLM_Name], omniscient: bool = False, script_like: bool = False, json_in_script: bool = False, tag: str | None = None, push_to_db: bool = False, ) -> list[tuple[str, str, Message]]: agents = Agents({agent.agent_name: agent for agent in agent_list}) environment_messages = env.reset(agents=agents, omniscient=omniscient) agents_model_names = [model_dict["agent1"], model_dict["agent2"]] for agent_name, agent_model in zip(env.agents, agents_model_names): if agent_model == "human": agents[agent_name] = HumanAgent(agent_name) elif agent_model == "redis":
agents[agent_name] = RedisAgent(agent_name)
5
2023-10-23 19:47:26+00:00
24k
shrimo/SLAMBox
node_graph.py
[ { "identifier": "NodeGraph", "path": "NodeGraphQt/NodeGraphQt/base/graph.py", "snippet": "class NodeGraph(QtCore.QObject):\n \"\"\"\n The ``NodeGraph`` class is the main controller for managing all nodes\n and the node graph.\n\n Inherited from: :class:`PySide2.QtCore.QObject`\n\n .. imag...
import sys import socket import pickle import json import requests import config as cfg import plugins_ui as plugins from Qt import QtCore, QtWidgets from PySide2.QtGui import QPixmap from NodeGraphQt import NodeGraph, PropertiesBinWidget
16,903
#!/usr/bin/python3.10 """ GUI for video or image analysis and processing Based on the NodeGraphQt a node graph UI framework written in python that can be implemented and re-purposed into applications supporting PySide2. """ # Loading configuration data if cfg.nodegraphqt not in sys.path: sys.path.append(cfg.nodegraphqt) PLUGINS = plugins.PluginRegistration()
#!/usr/bin/python3.10 """ GUI for video or image analysis and processing Based on the NodeGraphQt a node graph UI framework written in python that can be implemented and re-purposed into applications supporting PySide2. """ # Loading configuration data if cfg.nodegraphqt not in sys.path: sys.path.append(cfg.nodegraphqt) PLUGINS = plugins.PluginRegistration()
class NodeBased(NodeGraph):
0
2023-10-18 14:11:43+00:00
24k
f0uriest/interpax
tests/test_interpolate.py
[ { "identifier": "fft_interp1d", "path": "interpax/_fourier.py", "snippet": "@partial(jit, static_argnames=\"n\")\ndef fft_interp1d(f: jax.Array, n: int, sx: jax.Array = None, dx: float = 1.0):\n \"\"\"Interpolation of a 1d periodic function via FFT.\n\n Parameters\n ----------\n f : ndarray,...
import jax import jax.numpy as jnp import numpy as np import pytest from jax import config as jax_config from interpax import ( Interpolator1D, Interpolator2D, Interpolator3D, fft_interp1d, fft_interp2d, interp1d, interp2d, interp3d, )
15,785
"""Ensure monotonic interpolation is actually monotonic.""" # true function is just linear with a jump discontinuity at x=1.5 x = np.linspace(-4, 5, 10) f = np.heaviside(x - 1.5, 0) + 0.1 * x xq = np.linspace(-4, 5, 1000) dfc = interp1d(xq, x, f, derivative=1, method="cubic") dfm = interp1d(xq, x, f, derivative=1, method="monotonic") dfm0 = interp1d(xq, x, f, derivative=1, method="monotonic-0") assert dfc.min() < 0 # cubic interpolation undershoots, giving negative slope assert dfm.min() > 0 # monotonic interpolation doesn't assert dfm0.min() >= 0 # monotonic-0 doesn't overshoot either # ensure monotonic-0 has 0 slope at end points np.testing.assert_allclose(dfm0[np.array([0, -1])], 0, atol=1e-12) class TestInterp2D: """Tests for interp2d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x, y", [ (np.linspace(0, 3 * np.pi, 1000), np.linspace(0, 2 * np.pi, 1000)), (0.0, 0.0), ], ) def test_interp2d(self, x, y): """Test accuracy of different 2d interpolation methods.""" xp = np.linspace(0, 3 * np.pi, 99) yp = np.linspace(0, 2 * np.pi, 40) xxp, yyp = np.meshgrid(xp, yp, indexing="ij") f = lambda x, y: np.sin(x) * np.cos(y) fp = f(xxp, yyp) interp1 = lambda xq, yq, *args, **kwargs: interp2d(xq, yq, *args, **kwargs) interp2 = lambda xq, yq, *args, **kwargs: Interpolator2D(*args, **kwargs)( xq, yq ) for interp in [interp1, interp2]: fq = interp( x, y, xp, yp, fp, method="nearest", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=1e-2, atol=1) fq = interp( x, y, xp, yp, fp, method="linear", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=1e-4, atol=1e-2) atol = 2e-3 rtol = 1e-5 fq = interp(x, y, xp, yp, fp, method="cubic", period=(2 * np.pi, 2 * np.pi)) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="cubic2", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="catmull-rom", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="cardinal", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) @pytest.mark.unit def test_interp2d_vector_valued(self): """Test for interpolating vector valued function.""" xp = np.linspace(0, 3 * np.pi, 99) yp = np.linspace(0, 2 * np.pi, 40) x = np.linspace(0, 3 * np.pi, 200) y = np.linspace(0, 2 * np.pi, 200) xxp, yyp = np.meshgrid(xp, yp, indexing="ij") f = lambda x, y: np.array([np.sin(x) * np.cos(y), np.sin(x) + np.cos(y)]) fp = f(xxp.T, yyp.T).T fq = interp2d(x, y, xp, yp, fp, method="nearest") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-2, atol=1.2e-1) fq = interp2d(x, y, xp, yp, fp, method="linear") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-3, atol=1e-2) fq = interp2d(x, y, xp, yp, fp, method="cubic") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-5, atol=2e-3) class TestInterp3D: """Tests for interp3d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x, y, z", [ ( np.linspace(0, np.pi, 1000), np.linspace(0, 2 * np.pi, 1000), np.linspace(0, 3, 1000), ), (0.0, 0.0, 0.0), ], ) def test_interp3d(self, x, y, z): """Test accuracy of different 3d interpolation methods.""" xp = np.linspace(0, np.pi, 20) yp = np.linspace(0, 2 * np.pi, 30) zp = np.linspace(0, 3, 25) xxp, yyp, zzp = np.meshgrid(xp, yp, zp, indexing="ij") f = lambda x, y, z: np.sin(x) * np.cos(y) * z**2 fp = f(xxp, yyp, zzp) interp1 = lambda xq, yq, zq, *args, **kwargs: interp3d( xq, yq, zq, *args, **kwargs )
"""Tests for interpolation functions.""" jax_config.update("jax_enable_x64", True) class TestInterp1D: """Tests for interp1d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x", [ np.linspace(0, 2 * np.pi, 10000), 0.0, ], ) def test_interp1d(self, x): """Test accuracy of different 1d interpolation methods.""" xp = np.linspace(0, 2 * np.pi, 100) f = lambda x: np.sin(x) fp = f(xp) interp1 = lambda xq, *args, **kwargs: interp1d(xq, *args, **kwargs) interp2 = lambda xq, *args, **kwargs: Interpolator1D(*args, **kwargs)(xq) for interp in [interp1, interp2]: fq = interp(x, xp, fp, method="nearest") np.testing.assert_allclose(fq, f(x), rtol=1e-2, atol=1e-1) fq = interp(x, xp, fp, method="linear") np.testing.assert_allclose(fq, f(x), rtol=1e-4, atol=1e-3) fq = interp(x, xp, fp, method="cubic") np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-5) fq = interp(x, xp, fp, method="cubic2") np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-5) fq = interp(x, xp, fp, method="cardinal") np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-5) fq = interp(x, xp, fp, method="catmull-rom") np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-5) fq = interp(x, xp, fp, method="monotonic") np.testing.assert_allclose(fq, f(x), rtol=1e-4, atol=1e-3) fq = interp(x, xp, fp, method="monotonic-0") np.testing.assert_allclose(fq, f(x), rtol=1e-4, atol=1e-2) @pytest.mark.unit def test_interp1d_vector_valued(self): """Test for interpolating vector valued function.""" xp = np.linspace(0, 2 * np.pi, 100) x = np.linspace(0, 2 * np.pi, 300)[10:-10] f = lambda x: np.array([np.sin(x), np.cos(x)]) fp = f(xp).T fq = interp1d(x, xp, fp, method="nearest") np.testing.assert_allclose(fq, f(x).T, rtol=1e-2, atol=1e-1) fq = interp1d(x, xp, fp, method="linear") np.testing.assert_allclose(fq, f(x).T, rtol=1e-4, atol=1e-3) fq = interp1d(x, xp, fp, method="cubic") np.testing.assert_allclose(fq, f(x).T, rtol=1e-6, atol=1e-5) fq = interp1d(x, xp, fp, method="cubic2") np.testing.assert_allclose(fq, f(x).T, rtol=1e-6, atol=1e-5) fq = interp1d(x, xp, fp, method="cardinal") np.testing.assert_allclose(fq, f(x).T, rtol=1e-6, atol=1e-5) fq = interp1d(x, xp, fp, method="catmull-rom") np.testing.assert_allclose(fq, f(x).T, rtol=1e-6, atol=1e-5) fq = interp1d(x, xp, fp, method="monotonic") np.testing.assert_allclose(fq, f(x).T, rtol=1e-4, atol=1e-3) fq = interp1d(x, xp, fp, method="monotonic-0") np.testing.assert_allclose(fq, f(x).T, rtol=1e-4, atol=1e-2) @pytest.mark.unit def test_interp1d_extrap_periodic(self): """Test extrapolation and periodic BC of 1d interpolation.""" xp = np.linspace(0, 2 * np.pi, 200) x = np.linspace(-1, 2 * np.pi + 1, 10000) f = lambda x: np.sin(x) fp = f(xp) fq = interp1d(x, xp, fp, method="cubic", extrap=False) assert np.isnan(fq[0]) assert np.isnan(fq[-1]) fq = interp1d(x, xp, fp, method="cubic", extrap=True) assert not np.isnan(fq[0]) assert not np.isnan(fq[-1]) fq = interp1d(x, xp, fp, method="cubic", period=2 * np.pi) np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-2) @pytest.mark.unit def test_interp1d_monotonic(self): """Ensure monotonic interpolation is actually monotonic.""" # true function is just linear with a jump discontinuity at x=1.5 x = np.linspace(-4, 5, 10) f = np.heaviside(x - 1.5, 0) + 0.1 * x xq = np.linspace(-4, 5, 1000) dfc = interp1d(xq, x, f, derivative=1, method="cubic") dfm = interp1d(xq, x, f, derivative=1, method="monotonic") dfm0 = interp1d(xq, x, f, derivative=1, method="monotonic-0") assert dfc.min() < 0 # cubic interpolation undershoots, giving negative slope assert dfm.min() > 0 # monotonic interpolation doesn't assert dfm0.min() >= 0 # monotonic-0 doesn't overshoot either # ensure monotonic-0 has 0 slope at end points np.testing.assert_allclose(dfm0[np.array([0, -1])], 0, atol=1e-12) class TestInterp2D: """Tests for interp2d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x, y", [ (np.linspace(0, 3 * np.pi, 1000), np.linspace(0, 2 * np.pi, 1000)), (0.0, 0.0), ], ) def test_interp2d(self, x, y): """Test accuracy of different 2d interpolation methods.""" xp = np.linspace(0, 3 * np.pi, 99) yp = np.linspace(0, 2 * np.pi, 40) xxp, yyp = np.meshgrid(xp, yp, indexing="ij") f = lambda x, y: np.sin(x) * np.cos(y) fp = f(xxp, yyp) interp1 = lambda xq, yq, *args, **kwargs: interp2d(xq, yq, *args, **kwargs) interp2 = lambda xq, yq, *args, **kwargs: Interpolator2D(*args, **kwargs)( xq, yq ) for interp in [interp1, interp2]: fq = interp( x, y, xp, yp, fp, method="nearest", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=1e-2, atol=1) fq = interp( x, y, xp, yp, fp, method="linear", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=1e-4, atol=1e-2) atol = 2e-3 rtol = 1e-5 fq = interp(x, y, xp, yp, fp, method="cubic", period=(2 * np.pi, 2 * np.pi)) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="cubic2", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="catmull-rom", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="cardinal", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) @pytest.mark.unit def test_interp2d_vector_valued(self): """Test for interpolating vector valued function.""" xp = np.linspace(0, 3 * np.pi, 99) yp = np.linspace(0, 2 * np.pi, 40) x = np.linspace(0, 3 * np.pi, 200) y = np.linspace(0, 2 * np.pi, 200) xxp, yyp = np.meshgrid(xp, yp, indexing="ij") f = lambda x, y: np.array([np.sin(x) * np.cos(y), np.sin(x) + np.cos(y)]) fp = f(xxp.T, yyp.T).T fq = interp2d(x, y, xp, yp, fp, method="nearest") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-2, atol=1.2e-1) fq = interp2d(x, y, xp, yp, fp, method="linear") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-3, atol=1e-2) fq = interp2d(x, y, xp, yp, fp, method="cubic") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-5, atol=2e-3) class TestInterp3D: """Tests for interp3d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x, y, z", [ ( np.linspace(0, np.pi, 1000), np.linspace(0, 2 * np.pi, 1000), np.linspace(0, 3, 1000), ), (0.0, 0.0, 0.0), ], ) def test_interp3d(self, x, y, z): """Test accuracy of different 3d interpolation methods.""" xp = np.linspace(0, np.pi, 20) yp = np.linspace(0, 2 * np.pi, 30) zp = np.linspace(0, 3, 25) xxp, yyp, zzp = np.meshgrid(xp, yp, zp, indexing="ij") f = lambda x, y, z: np.sin(x) * np.cos(y) * z**2 fp = f(xxp, yyp, zzp) interp1 = lambda xq, yq, zq, *args, **kwargs: interp3d( xq, yq, zq, *args, **kwargs )
interp2 = lambda xq, yq, zq, *args, **kwargs: Interpolator3D(*args, **kwargs)(
4
2023-10-18 13:12:20+00:00
24k
city96/ComfyUI_ExtraModels
PixArt/sampler.py
[ { "identifier": "gaussian_diffusion", "path": "PixArt/sampling/gaussian_diffusion.py", "snippet": "def mean_flat(tensor):\n def is_vb(self):\ndef _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, warmup_frac):\ndef get_beta_schedule(beta_schedule, *, beta_start, beta_end, num_diffusion_time...
import torch import comfy.utils import latent_preview from .sampling import gaussian_diffusion as gd from .sampling.dpm_solver import model_wrapper, DPM_Solver, NoiseScheduleVP from comfy.sample import prepare_sampling, prepare_noise, cleanup_additional_models, get_models_from_cond
20,107
def sample_pixart(model, seed, steps, cfg, noise_schedule, noise_schedule_vp, positive, negative, latent_image): """ Mostly just a wrapper around the reference code. """ # prepare model noise = prepare_noise(latent_image, seed) real_model, _, _, _, models = prepare_sampling(model, noise.shape, positive, negative, noise_mask=None) # negative cond cond = positive[0][0] raw_uncond = negative[0][0] # Sampler seems to want the same dim for cond and uncond # truncate uncond to the length of cond # if shorter, pad uncond with y_null null_y = real_model.diffusion_model.y_embedder.y_embedding[None].repeat(latent_image.shape[0], 1, 1) uncond = null_y[:, :cond.shape[1], :] uncond[:, :raw_uncond.shape[1], :] = raw_uncond[:, :cond.shape[1], :] if raw_uncond.shape[1] > cond.shape[1]: print("PixArt: Warning. Your negative prompt is too long.") uncond[:, -1, :] = raw_uncond[:, -1, :] # add back EOS token # Move inputs cond = cond.to(model.load_device).to(real_model.diffusion_model.dtype) uncond = uncond.to(model.load_device).to(real_model.diffusion_model.dtype) noise = noise.to(model.load_device).to(real_model.diffusion_model.dtype) # preview pbar = comfy.utils.ProgressBar(steps) previewer = latent_preview.get_previewer(model.load_device, model.model.latent_format) ## Noise schedule. betas = torch.tensor(gd.get_named_beta_schedule(noise_schedule, 1000)) noise_schedule = NoiseScheduleVP(schedule=noise_schedule_vp, betas=betas) ## Convert your discrete-time `model` to the continuous-time ## noise prediction model. Here is an example for a diffusion model ## `model` with the noise prediction type ("noise") .
def sample_pixart(model, seed, steps, cfg, noise_schedule, noise_schedule_vp, positive, negative, latent_image): """ Mostly just a wrapper around the reference code. """ # prepare model noise = prepare_noise(latent_image, seed) real_model, _, _, _, models = prepare_sampling(model, noise.shape, positive, negative, noise_mask=None) # negative cond cond = positive[0][0] raw_uncond = negative[0][0] # Sampler seems to want the same dim for cond and uncond # truncate uncond to the length of cond # if shorter, pad uncond with y_null null_y = real_model.diffusion_model.y_embedder.y_embedding[None].repeat(latent_image.shape[0], 1, 1) uncond = null_y[:, :cond.shape[1], :] uncond[:, :raw_uncond.shape[1], :] = raw_uncond[:, :cond.shape[1], :] if raw_uncond.shape[1] > cond.shape[1]: print("PixArt: Warning. Your negative prompt is too long.") uncond[:, -1, :] = raw_uncond[:, -1, :] # add back EOS token # Move inputs cond = cond.to(model.load_device).to(real_model.diffusion_model.dtype) uncond = uncond.to(model.load_device).to(real_model.diffusion_model.dtype) noise = noise.to(model.load_device).to(real_model.diffusion_model.dtype) # preview pbar = comfy.utils.ProgressBar(steps) previewer = latent_preview.get_previewer(model.load_device, model.model.latent_format) ## Noise schedule. betas = torch.tensor(gd.get_named_beta_schedule(noise_schedule, 1000)) noise_schedule = NoiseScheduleVP(schedule=noise_schedule_vp, betas=betas) ## Convert your discrete-time `model` to the continuous-time ## noise prediction model. Here is an example for a diffusion model ## `model` with the noise prediction type ("noise") .
model_fn = model_wrapper(
1
2023-10-20 21:19:44+00:00
24k
amitfin/oref_alert
custom_components/oref_alert/config_flow.py
[ { "identifier": "CONF_AREAS", "path": "custom_components/oref_alert/const.py", "snippet": "CONF_AREAS: Final = \"areas\"" }, { "identifier": "CONF_ALERT_MAX_AGE", "path": "custom_components/oref_alert/const.py", "snippet": "CONF_ALERT_MAX_AGE: Final = \"alert_max_age\"" }, { "ide...
import contextlib import voluptuous as vol import homeassistant.helpers.config_validation as cv from typing import Any from homeassistant.config_entries import ConfigEntry, ConfigFlow, OptionsFlow from homeassistant.core import async_get_hass, callback from homeassistant.data_entry_flow import FlowResult from homeassistant.exceptions import HomeAssistantError from homeassistant.helpers import selector from .const import ( CONF_AREAS, CONF_ALERT_MAX_AGE, CONF_OFF_ICON, CONF_ON_ICON, CONF_POLL_INTERVAL, DEFAULT_ALERT_MAX_AGE, DOMAIN, DEFAULT_OFF_ICON, DEFAULT_ON_ICON, DEFAULT_POLL_INTERVAL, TITLE, ) from .metadata.area_to_polygon import find_area from .metadata.areas_and_groups import AREAS_AND_GROUPS
18,888
"""Config flow for oref_alert integration.""" from __future__ import annotations AREAS_CONFIG = selector.SelectSelectorConfig( options=AREAS_AND_GROUPS, mode=selector.SelectSelectorMode.DROPDOWN, multiple=True, custom_value=False, ) CONFIG_SCHEMA = vol.Schema( {vol.Required(CONF_AREAS, default=[]): selector.SelectSelector(AREAS_CONFIG)} ) class OrefAlertConfigFlow(ConfigFlow, domain=DOMAIN): """Config flow.""" def __init__(self) -> None: """Initialize object with defaults.""" self._auto_detected_area: str | None = None async def async_step_user( self, user_input: dict[str, Any] | None = None ) -> FlowResult: """Handle a flow initialized by the user.""" if self._async_current_entries(): return self.async_abort(reason="single_instance_allowed") if user_input is not None: return await self.async_step_confirm(user_input) hass = None with contextlib.suppress(HomeAssistantError): hass = async_get_hass() if hass: self._auto_detected_area = find_area( hass.config.latitude, hass.config.longitude ) if not self._auto_detected_area: return self.async_show_form(step_id="user", data_schema=CONFIG_SCHEMA) return await self.async_step_confirm(None) async def async_step_confirm( self, user_input: dict[str, Any] | None = None ) -> FlowResult: """Confirm the setup.""" if user_input is not None: return self.async_create_entry( title=TITLE, data={}, options={ CONF_AREAS: user_input.get(CONF_AREAS, [self._auto_detected_area]), CONF_ALERT_MAX_AGE: DEFAULT_ALERT_MAX_AGE, CONF_POLL_INTERVAL: DEFAULT_POLL_INTERVAL,
"""Config flow for oref_alert integration.""" from __future__ import annotations AREAS_CONFIG = selector.SelectSelectorConfig( options=AREAS_AND_GROUPS, mode=selector.SelectSelectorMode.DROPDOWN, multiple=True, custom_value=False, ) CONFIG_SCHEMA = vol.Schema( {vol.Required(CONF_AREAS, default=[]): selector.SelectSelector(AREAS_CONFIG)} ) class OrefAlertConfigFlow(ConfigFlow, domain=DOMAIN): """Config flow.""" def __init__(self) -> None: """Initialize object with defaults.""" self._auto_detected_area: str | None = None async def async_step_user( self, user_input: dict[str, Any] | None = None ) -> FlowResult: """Handle a flow initialized by the user.""" if self._async_current_entries(): return self.async_abort(reason="single_instance_allowed") if user_input is not None: return await self.async_step_confirm(user_input) hass = None with contextlib.suppress(HomeAssistantError): hass = async_get_hass() if hass: self._auto_detected_area = find_area( hass.config.latitude, hass.config.longitude ) if not self._auto_detected_area: return self.async_show_form(step_id="user", data_schema=CONFIG_SCHEMA) return await self.async_step_confirm(None) async def async_step_confirm( self, user_input: dict[str, Any] | None = None ) -> FlowResult: """Confirm the setup.""" if user_input is not None: return self.async_create_entry( title=TITLE, data={}, options={ CONF_AREAS: user_input.get(CONF_AREAS, [self._auto_detected_area]), CONF_ALERT_MAX_AGE: DEFAULT_ALERT_MAX_AGE, CONF_POLL_INTERVAL: DEFAULT_POLL_INTERVAL,
CONF_ON_ICON: DEFAULT_ON_ICON,
8
2023-10-18 11:16:41+00:00
24k
RobertCsordas/moe
tasks/simple/language_model/transformer_lm_mixin.py
[ { "identifier": "TransformerLanguageModel", "path": "models/transformer_language_model.py", "snippet": "class TransformerLanguageModel(LoggingLayer, torch.nn.Module):\n def __init__(self, voc_size: int, embedding_size: Optional[int], state_size: int, dropout: float,\n tied_embedding: ...
import framework import torch import torch.nn import torch.nn.functional as F import torch.utils.data import math from typing import List, Tuple, Dict, Any from models import TransformerLanguageModel from ... import task, args from layers.transformer import RelativeTransformerEncoderLayer, PrelnRelativeTransformerEncoderLayer from layers.transformer.relative_preln_kvmem_transformer import PrelnRelativeKVMemTransformerEncoderLayer from layers.transformer.relative_moe_transformer import RelativeMoeTransformerEncoderLayer from layers.transformer.topk_transformer import TopkTransformer from layers.moe_layer import MoE from interfaces import Result
18,811
parser.add_argument("-moe.n_random", default=0) parser.add_argument("-moe.std_correction", default=False) parser.add_argument("-moe.topk_mode", default="full", choice=["full", "l1_approx", "approx"]) parser.add_argument("-moe.activation_after_topk", default=False) parser.add_argument("-moe.weight_grouping", default="none", choice=["none", "keys_only", "keys_and_experts"]) parser.add_argument("-moe.drop_parallel", default=True) parser.add_argument("-moe.mlp_selection", default=False) parser.add_argument("-moe.block_expert_sel_in_grad", default=False) parser.add_argument("-moe.classification_target", default="sum", choice=["sum", "max"]) parser.add_argument("-moe.recluster_steps", default="", parser=parser.int_list_parser) parser.add_argument("-moe.norm_key_init", default=False) parser.add_argument("-moe.norm_value_init", default=False) parser.add_argument("-moe.norm_expert_sel_init", default=False) parser.add_argument("-moe.norm_standard_parallel_values", default=False) parser.add_argument("-moe.identical_init", default=False) parser.add_argument("-moe.topological_sel_reg", default=0.0) parser.add_argument("-moe.topological_expert_reg", default=0.0) parser.add_argument("-moe.sel_lr_multipler", default=1.0) parser.add_argument("-moe.expert_lr_multipler", default=1.0) parser.add_argument("-moe.gumbel_select_only", default=False) parser.add_argument("-moe.topk_value_norm_compensation", default=False) parser.add_argument("-moe.norm_expert_scores", default=False) parser.add_argument("-moe.sel_input_cluster_init", default=False) parser.add_argument("-moe.init_norm_mode", default="full") parser.add_argument("-moe.bias", default=False) parser.add_argument("-moe.sel_bias", default=False) parser.add_argument("-moe.rescale_normed", default=False) parser.add_argument("-moe.sel_norm", default="none", choice=["none", "cos", "input", "weights"]) parser.add_argument("-moe.rescale_grads", default=False) parser.add_argument("-moe.gumbel_decay", default=0) parser.add_argument("-moe.sinkhorn_local", default=False) parser.add_argument("-moe.sinkhron_n_iters", default=3) parser.add_argument("-moe.dropout_factor", default=1.0) parser.add_argument("-moe.drop_expert", default=0.0) parser.add_argument("-moe.expert_size_init", default=False) parser.add_argument("-moe.sync_distributed", default=True) parser.add_argument("-moe.modulation_amplitude", default=0.5) parser.add_argument("-moe.invisible_selection", default=False) parser.add_argument("-moe.slope_multiplier", default=1.0) parser.add_argument("-moe.init_scale", default=1.0) parser.add_argument("-kvmem.linproj", default=False) parser.add_argument("-kvmem.head_merge_topk", default=False) parser.add_argument("-kvmem.load_balance", default=False) parser.add_argument("-kvmem.dropout", default="none", choice=["none", "early", "late", "weight", "score"]) parser.add_argument("-kvmem.randomize_indices", default=False) parser.add_argument("-kvmem.standard_parallel", default=False) parser.add_argument("-kvmem.query_bias", default=False) parser.add_argument("-kvmem.approx_topk", default=False) parser.add_argument("-kvmem.norm_values", default=False) parser.add_argument("-kvmem.factorize", default=False) parser.add_argument("-kvmem.full_key", default=False) parser.add_argument("-kvmem.key_redundancy_factor", default=1) parser.add_argument("-kvmem.two_stage", default=False) parser.add_argument("-kvmem.head_exclusive", default=False) parser.add_argument("-transformer.topk_value", default=32) parser.add_argument("-transformer.universal.nonshared", default=0) parser.add_argument("-transformer.topk_use_norm", default=True) parser.add_argument("-transformer.activation", default="relu", choice=["relu", "topk", "gelu", "identity", "sigmoid", "softmax"]) parser.add_argument("-transformer.p_drop_layer", default=0.0) parser.add_argument("-transformer.head_projection_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.ln_affine", default=True) parser.add_argument("-transformer.ln_after_attention", default=True) parser.add_argument("-transformer.output_mode", default="normal", choice=["normal", "sum", "geometric", "sigmoid"]) @task() class TransformerLMMixin: helper: framework.helpers.TrainingHelper def is_preln(self) -> bool: return "preln" in self.helper.args.transformer.variant def topk_activation(self, x: torch.Tensor) -> torch.Tensor: nx = -x return torch.masked_fill(x, nx <= nx.kthvalue(self.helper.args.transformer.topk_value, keepdim=True)[0], 0) def get_layers(self) -> List[torch.nn.Module]: # pyright: reportOptionalMemberAccess=false if self.helper.args.transformer.activation == "relu": activation = F.relu elif self.helper.args.transformer.activation == "topk": activation = self.topk_activation elif self.helper.args.transformer.activation == "identity": activation = lambda x: x elif self.helper.args.transformer.activation == "sigmoid": activation = torch.sigmoid elif self.helper.args.transformer.activation == "gelu": activation = F.gelu elif self.helper.args.transformer.activation == "softmax": activation = lambda x: F.softmax(x, dim=-1) else: raise ValueError(f"Invalid activation: {self.helper.args.transformer.activation}") base_args = dict( d_model=self.helper.args.state_size, nhead=self.helper.args.transformer.n_heads, dim_feedforward=int(self.helper.args.state_size * self.helper.args.transformer.ff_multiplier), dropout=self.helper.args.dropout, activation=activation ) extra_args = {} if not self.helper.args.transformer.variant.endswith("_gelu") else { "activation": F.gelu, "drop_expand": False } if self.helper.args.transformer.variant in {"preln_relative"}: mklayer = lambda: PrelnRelativeTransformerEncoderLayer( **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size,) elif self.helper.args.transformer.variant in {"preln_topk"}: mklayer = lambda: TopkTransformer( **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, k=self.helper.args.transformer.topk_value, use_norm=self.helper.args.transformer.topk_use_norm, head_projection_size=self.helper.args.transformer.head_projection_size,) elif self.helper.args.transformer.variant in {"preln_kvmem"}:
@args def a(parser: framework.helpers.ArgumentParser): parser.add_argument("-lm.trafo.context_blocks", default=1) parser.add_argument("-lm.trafo.test_context_blocks", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.test_pos_clamp", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.same_length_eval", default=False) parser.add_argument("-lm.trafo.same_length", default=False) parser.add_argument("-lm.trafo.last_layer_context", default=False) parser.add_argument("-lm.trafo.xl_init", default=False) parser.add_argument("-lm.trafo.embedding_mode_init", default="default", choice=["default", "scale_to_sqrt_dmodel", "init_to_sqrt_dmodel", "one_and_scale_to_sqrt_dmodel", "like_preln"]) parser.add_argument("-pkm.n_keys", default="128", parser=parser.int_list_parser) parser.add_argument("-pkm.n_heads", default=1) parser.add_argument("-pkm.knn", default=32) parser.add_argument("-pkm.stochastic", default=False) parser.add_argument("-pkm.query_batchnorm", default=False) parser.add_argument("-pkm.custom_init", default=0) parser.add_argument("-pkm.slice_values", default=False) parser.add_argument("-pkm.slice_proj", default=False) parser.add_argument("-pkm.sample_smallest", default=False) parser.add_argument("-moe.n_experts", default=128) parser.add_argument("-moe.expert_size", default=128) parser.add_argument("-moe.selection_mode", default="add", choice=["add", "gate", "sigmoid", "gumbel", "hard_gumbel", "predict", "predict_mlp", "classify", "gumbel_sigmoid", "sinkhorn", "sinkhorn2", "sinkmoid", "sinkmax", "moe", "mul", "random", "sinkmoid2", "sinkmax2", "modulate"]) parser.add_argument("-moe.perplexity_reg", default=0.0) parser.add_argument("-moe.perplexity_reg_mode", default="step", choice=["step", "global", "time", "global_time"]) parser.add_argument("-moe.reg_type", default="entropy", choice=["perplexity", "variance", "entropy", "l2", "switch", "normal"]) parser.add_argument("-moe.key_mode", default="moe", choice=["moe", "both", "shared"]) parser.add_argument("-moe.half_key", default=False) parser.add_argument("-moe.norm_keys", default=False) parser.add_argument("-moe.kmeans_distance", default='cosine', choice=['cosine', 'euclidean']) parser.add_argument("-moe.n_random", default=0) parser.add_argument("-moe.std_correction", default=False) parser.add_argument("-moe.topk_mode", default="full", choice=["full", "l1_approx", "approx"]) parser.add_argument("-moe.activation_after_topk", default=False) parser.add_argument("-moe.weight_grouping", default="none", choice=["none", "keys_only", "keys_and_experts"]) parser.add_argument("-moe.drop_parallel", default=True) parser.add_argument("-moe.mlp_selection", default=False) parser.add_argument("-moe.block_expert_sel_in_grad", default=False) parser.add_argument("-moe.classification_target", default="sum", choice=["sum", "max"]) parser.add_argument("-moe.recluster_steps", default="", parser=parser.int_list_parser) parser.add_argument("-moe.norm_key_init", default=False) parser.add_argument("-moe.norm_value_init", default=False) parser.add_argument("-moe.norm_expert_sel_init", default=False) parser.add_argument("-moe.norm_standard_parallel_values", default=False) parser.add_argument("-moe.identical_init", default=False) parser.add_argument("-moe.topological_sel_reg", default=0.0) parser.add_argument("-moe.topological_expert_reg", default=0.0) parser.add_argument("-moe.sel_lr_multipler", default=1.0) parser.add_argument("-moe.expert_lr_multipler", default=1.0) parser.add_argument("-moe.gumbel_select_only", default=False) parser.add_argument("-moe.topk_value_norm_compensation", default=False) parser.add_argument("-moe.norm_expert_scores", default=False) parser.add_argument("-moe.sel_input_cluster_init", default=False) parser.add_argument("-moe.init_norm_mode", default="full") parser.add_argument("-moe.bias", default=False) parser.add_argument("-moe.sel_bias", default=False) parser.add_argument("-moe.rescale_normed", default=False) parser.add_argument("-moe.sel_norm", default="none", choice=["none", "cos", "input", "weights"]) parser.add_argument("-moe.rescale_grads", default=False) parser.add_argument("-moe.gumbel_decay", default=0) parser.add_argument("-moe.sinkhorn_local", default=False) parser.add_argument("-moe.sinkhron_n_iters", default=3) parser.add_argument("-moe.dropout_factor", default=1.0) parser.add_argument("-moe.drop_expert", default=0.0) parser.add_argument("-moe.expert_size_init", default=False) parser.add_argument("-moe.sync_distributed", default=True) parser.add_argument("-moe.modulation_amplitude", default=0.5) parser.add_argument("-moe.invisible_selection", default=False) parser.add_argument("-moe.slope_multiplier", default=1.0) parser.add_argument("-moe.init_scale", default=1.0) parser.add_argument("-kvmem.linproj", default=False) parser.add_argument("-kvmem.head_merge_topk", default=False) parser.add_argument("-kvmem.load_balance", default=False) parser.add_argument("-kvmem.dropout", default="none", choice=["none", "early", "late", "weight", "score"]) parser.add_argument("-kvmem.randomize_indices", default=False) parser.add_argument("-kvmem.standard_parallel", default=False) parser.add_argument("-kvmem.query_bias", default=False) parser.add_argument("-kvmem.approx_topk", default=False) parser.add_argument("-kvmem.norm_values", default=False) parser.add_argument("-kvmem.factorize", default=False) parser.add_argument("-kvmem.full_key", default=False) parser.add_argument("-kvmem.key_redundancy_factor", default=1) parser.add_argument("-kvmem.two_stage", default=False) parser.add_argument("-kvmem.head_exclusive", default=False) parser.add_argument("-transformer.topk_value", default=32) parser.add_argument("-transformer.universal.nonshared", default=0) parser.add_argument("-transformer.topk_use_norm", default=True) parser.add_argument("-transformer.activation", default="relu", choice=["relu", "topk", "gelu", "identity", "sigmoid", "softmax"]) parser.add_argument("-transformer.p_drop_layer", default=0.0) parser.add_argument("-transformer.head_projection_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.ln_affine", default=True) parser.add_argument("-transformer.ln_after_attention", default=True) parser.add_argument("-transformer.output_mode", default="normal", choice=["normal", "sum", "geometric", "sigmoid"]) @task() class TransformerLMMixin: helper: framework.helpers.TrainingHelper def is_preln(self) -> bool: return "preln" in self.helper.args.transformer.variant def topk_activation(self, x: torch.Tensor) -> torch.Tensor: nx = -x return torch.masked_fill(x, nx <= nx.kthvalue(self.helper.args.transformer.topk_value, keepdim=True)[0], 0) def get_layers(self) -> List[torch.nn.Module]: # pyright: reportOptionalMemberAccess=false if self.helper.args.transformer.activation == "relu": activation = F.relu elif self.helper.args.transformer.activation == "topk": activation = self.topk_activation elif self.helper.args.transformer.activation == "identity": activation = lambda x: x elif self.helper.args.transformer.activation == "sigmoid": activation = torch.sigmoid elif self.helper.args.transformer.activation == "gelu": activation = F.gelu elif self.helper.args.transformer.activation == "softmax": activation = lambda x: F.softmax(x, dim=-1) else: raise ValueError(f"Invalid activation: {self.helper.args.transformer.activation}") base_args = dict( d_model=self.helper.args.state_size, nhead=self.helper.args.transformer.n_heads, dim_feedforward=int(self.helper.args.state_size * self.helper.args.transformer.ff_multiplier), dropout=self.helper.args.dropout, activation=activation ) extra_args = {} if not self.helper.args.transformer.variant.endswith("_gelu") else { "activation": F.gelu, "drop_expand": False } if self.helper.args.transformer.variant in {"preln_relative"}: mklayer = lambda: PrelnRelativeTransformerEncoderLayer( **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size,) elif self.helper.args.transformer.variant in {"preln_topk"}: mklayer = lambda: TopkTransformer( **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, k=self.helper.args.transformer.topk_value, use_norm=self.helper.args.transformer.topk_use_norm, head_projection_size=self.helper.args.transformer.head_projection_size,) elif self.helper.args.transformer.variant in {"preln_kvmem"}:
mklayer = lambda: PrelnRelativeKVMemTransformerEncoderLayer(
5
2023-10-16 11:26:45+00:00
24k
Jacob-Zhou/gecdi
gec/parser.py
[ { "identifier": "Dataset", "path": "gec/data.py", "snippet": "class Dataset(torch.utils.data.Dataset):\n r\"\"\"\n Dataset that is compatible with :class:`torch.utils.data.Dataset`, serving as a wrapper for manipulating all data fields\n with the operating behaviours defined in :class:`~supar.u...
import os import shutil import tempfile import math import dill import torch import torch.distributed as dist from datetime import datetime, timedelta from typing import Iterable, Union from gec.data import Dataset from gec.fn import map_token_ids from supar.parser import Parser from supar.utils import Config from supar.utils.common import MIN, NUL, UNK from supar.utils.field import RawField from supar.utils.fn import set_rng_state from supar.utils.logging import get_logger, init_logger, progress_bar from supar.utils.metric import Metric from supar.utils.optim import PolynomialLR from supar.utils.parallel import DistributedDataParallel as DDP, gather, is_dist from supar.utils.parallel import is_master from supar.utils.tokenizer import TransformerTokenizer from supar.utils.transform import AttachJuxtaposeTree, Batch from torch.cuda.amp import GradScaler from torch.optim import AdamW from torch.optim.lr_scheduler import ExponentialLR from torch.nn.functional import embedding from .metric import PerplexityMetric, SpanMetric from .model import Seq2SeqDetectModel, Seq2SeqModel from .transform import Field, Text, Tree from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import fp16_compress_hook from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import fp16_compress_hook from transformers import AutoTokenizer, GPT2LMHeadModel
14,429
self.checkpoint_state_dict.pop('optimizer_state_dict')) self.scheduler.load_state_dict( self.checkpoint_state_dict.pop('scheduler_state_dict')) self.scaler.load_state_dict( self.checkpoint_state_dict.pop('scaler_state_dict')) set_rng_state(self.checkpoint_state_dict.pop('rng_state')) for k, v in self.checkpoint_state_dict.items(): setattr(self, k, v) train.loader.batch_sampler.epoch = self.epoch except AttributeError: logger.warning( "No checkpoint found. Try re-launching the traing procedure instead" ) for epoch in range(self.epoch, args.epochs + 1): start = datetime.now() bar, metric = progress_bar(train.loader), Metric() logger.info(f"Epoch {epoch} / {args.epochs}:") self.model.train() if self.epoch == 1: torch.cuda.empty_cache() with self.join(): # we should zero `step` as the number of batches in different processes is not necessarily equal self.step = 0 for batch in bar: with self.sync(): with torch.autocast(self.device, enabled=self.args.amp): loss = self.train_step(batch) self.backward(loss) if self.sync_grad: self.clip_grad_norm_(self.model.parameters(), self.args.clip) self.scaler.step(self.optimizer) self.scaler.update() self.scheduler.step() self.optimizer.zero_grad(True) bar.set_postfix_str( f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f}" ) self.step += 1 logger.info(f"{bar.postfix}") self.model.eval() with self.join(), torch.autocast(self.device, enabled=self.args.amp): metric = self.reduce( sum([self.eval_step(i) for i in progress_bar(dev.loader)], Metric())) logger.info(f"{'dev:':5} {metric}") if args.test: test_metric = sum( [self.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {self.reduce(test_metric)}") t = datetime.now() - start self.epoch += 1 self.patience -= 1 self.elapsed += t if metric > self.best_metric: self.best_e, self.patience, self.best_metric = epoch, patience, metric if is_master(): self.save_checkpoint(args.path) logger.info(f"{t}s elapsed (saved)\n") else: logger.info(f"{t}s elapsed\n") if self.patience < 1: break if dist.is_initialized(): dist.barrier() best = self.load(**args) # only allow the master device to save models if is_master(): best.save(args.path) logger.info(f"Epoch {self.best_e} saved") logger.info(f"{'dev:':5} {self.best_metric}") if args.test: best.model.eval() with best.join(): test_metric = sum( [best.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {best.reduce(test_metric)}") logger.info(f"{self.elapsed}s elapsed, {self.elapsed / epoch}s/epoch") def train_step(self, batch: Batch) -> torch.Tensor: src, tgt, _, src_error, _, tgt_error = batch src_mask, tgt_mask = src.ne(self.args.pad_index), tgt.ne( self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_error, tgt_error, src_mask, tgt_mask) return loss @torch.no_grad() def eval_step(self, batch: Batch) -> PerplexityMetric: src, tgt, _, src_error, tgt_error_raw, tgt_error = batch src_mask, tgt_mask = src.ne(self.args.pad_index), tgt.ne( self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_error, tgt_error, src_mask, tgt_mask) def error_label_factorize(errors): return sum( [[(i, i + 1, e) for e in eb.split("::")] for i, eb in enumerate(errors) if eb not in {'CORRECT', NUL}], []) ged_golds = [error_label_factorize(e) for e in tgt_error_raw] ged_preds = [ error_label_factorize( [self.TGT_ERROR.vocab[i] for i in e if i >= 0]) for e in self.model.decode(x, tgt, src_mask, tgt_mask).tolist() ]
# -*- coding: utf-8 -*- logger = get_logger(__name__) class Seq2SeqParser(Parser): NAME = 'seq2seq' MODEL = Seq2SeqModel def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.SRC = self.transform.SRC self.TGT = self.transform.TGT def train(self, train: Union[str, Iterable], dev: Union[str, Iterable], test: Union[str, Iterable], epochs: int, patience: int, batch_size: int = 5000, update_steps: int = 1, buckets: int = 32, workers: int = 0, clip: float = 5.0, amp: bool = False, cache: bool = False, verbose: bool = True, **kwargs) -> None: args = self.args.update(locals()) init_logger(logger, verbose=args.verbose) self.transform.train() batch_size = batch_size // update_steps if dist.is_initialized(): batch_size = batch_size // dist.get_world_size() logger.info("Loading the data") if args.cache: args.bin = os.path.join(os.path.dirname(args.path), 'bin') train = Dataset(self.transform, args.train, **args).build(batch_size, buckets, True, dist.is_initialized(), workers, chunk_size=args.chunk_size, seed=args.seed) dev = Dataset(self.transform, args.dev, **args).build(batch_size, buckets, False, dist.is_initialized(), workers) logger.info(f"{'train:':6} {train}") if not args.test: logger.info(f"{'dev:':6} {dev}\n") else: test = Dataset(self.transform, args.test, **args).build(batch_size, buckets, False, dist.is_initialized(), workers) logger.info(f"{'dev:':6} {dev}") logger.info(f"{'test:':6} {test}\n") self.optimizer = AdamW(self.model.parameters(), args.lr, (args.mu, args.nu), args.eps, args.weight_decay) steps = len(train.loader) * epochs // args.update_steps self.scheduler = PolynomialLR(self.optimizer, warmup_steps=self.args.warmup_steps, steps=steps) self.scaler = GradScaler(enabled=args.amp) if dist.is_initialized(): self.model = DDP(self.model, device_ids=[args.local_rank], find_unused_parameters=args.get( 'find_unused_parameters', True)) if args.amp: self.model.register_comm_hook(dist.group.WORLD, fp16_compress_hook) self.step, self.epoch, self.best_e, self.patience, self.n_batches = 1, 1, 1, patience, len( train.loader) self.best_metric, self.elapsed = Metric(), timedelta() if self.args.checkpoint: try: self.optimizer.load_state_dict( self.checkpoint_state_dict.pop('optimizer_state_dict')) self.scheduler.load_state_dict( self.checkpoint_state_dict.pop('scheduler_state_dict')) self.scaler.load_state_dict( self.checkpoint_state_dict.pop('scaler_state_dict')) set_rng_state(self.checkpoint_state_dict.pop('rng_state')) for k, v in self.checkpoint_state_dict.items(): setattr(self, k, v) train.loader.batch_sampler.epoch = self.epoch except AttributeError: logger.warning( "No checkpoint found. Try re-launching the traing procedure instead" ) for epoch in range(self.epoch, args.epochs + 1): start = datetime.now() bar, metric = progress_bar(train.loader), Metric() logger.info(f"Epoch {epoch} / {args.epochs}:") self.model.train() if self.epoch == 1: torch.cuda.empty_cache() with self.join(): # we should zero `step` as the number of batches in different processes is not necessarily equal self.step = 0 for batch in bar: with self.sync(): with torch.autocast(self.device, enabled=self.args.amp): loss = self.train_step(batch) self.backward(loss) if self.sync_grad: self.clip_grad_norm_(self.model.parameters(), self.args.clip) self.scaler.step(self.optimizer) self.scaler.update() self.scheduler.step() self.optimizer.zero_grad(True) bar.set_postfix_str( f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f}" ) self.step += 1 logger.info(f"{bar.postfix}") self.model.eval() with self.join(), torch.autocast(self.device, enabled=self.args.amp): metric = self.reduce( sum([self.eval_step(i) for i in progress_bar(dev.loader)], Metric())) logger.info(f"{'dev:':5} {metric}") if args.test: test_metric = sum( [self.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {self.reduce(test_metric)}") t = datetime.now() - start self.epoch += 1 self.patience -= 1 self.elapsed += t if metric > self.best_metric: self.best_e, self.patience, self.best_metric = epoch, patience, metric if is_master(): self.save_checkpoint(args.path) logger.info(f"{t}s elapsed (saved)\n") else: logger.info(f"{t}s elapsed\n") if self.patience < 1: break if dist.is_initialized(): dist.barrier() best = self.load(**args) # only allow the master device to save models if is_master(): best.save(args.path) logger.info(f"Epoch {self.best_e} saved") logger.info(f"{'dev:':5} {self.best_metric}") if args.test: best.model.eval() with best.join(): test_metric = sum( [best.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {best.reduce(test_metric)}") logger.info(f"{self.elapsed}s elapsed, {self.elapsed / epoch}s/epoch") def evaluate(self, data: Union[str, Iterable], batch_size: int = 5000, buckets: int = 8, workers: int = 0, amp: bool = False, cache: bool = False, punct: bool = False, tree: bool = True, proj: bool = False, partial: bool = False, verbose: bool = True, **kwargs): return super().evaluate(**Config().update(locals())) def predict(self, data: Union[str, Iterable], pred: str = None, lang: str = None, prob: bool = False, batch_size: int = 5000, buckets: int = 8, workers: int = 0, amp: bool = False, cache: bool = False, tree: bool = True, proj: bool = False, verbose: bool = True, **kwargs): return super().predict(**Config().update(locals())) def train_step(self, batch: Batch) -> torch.Tensor: src, tgt = batch src_mask, tgt_mask = batch.mask, tgt.ne(self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_mask, tgt_mask) return loss @torch.no_grad() def eval_step(self, batch: Batch) -> PerplexityMetric: src, tgt = batch src_mask, tgt_mask = batch.mask, tgt.ne(self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_mask, tgt_mask) preds = golds = None if self.args.eval_tgt: golds = [(s.values[0], s.values[1]) for s in batch.sentences] preds = [(s.values[0], self.TGT.tokenize.decode(i[0])) for s, i in zip(batch.sentences, self.model.decode(x, batch.mask).tolist()) ] return PerplexityMetric(loss, preds, golds, tgt_mask, not self.args.eval_tgt) @torch.no_grad() def pred_step(self, batch: Batch) -> Batch: src, = batch x = self.model(src) tgt = self.model.decode(x, batch.mask) batch.tgt = [[self.TGT.tokenize.decode(cand) for cand in i] for i in tgt.tolist()] return batch @classmethod def build(cls, path, min_freq=2, fix_len=20, **kwargs): r""" Build a brand-new Parser, including initialization of all data fields and model parameters. Args: path (str): The path of the model to be saved. min_freq (str): The minimum frequency needed to include a token in the vocabulary. Default: 2. fix_len (int): The max length of all subword pieces. The excess part of each piece will be truncated. Required if using CharLSTM/BERT. Default: 20. kwargs (dict): A dict holding the unconsumed arguments. """ args = Config(**locals()) os.makedirs(os.path.dirname(path) or './', exist_ok=True) if os.path.exists(path) and not args.build: return cls.load(**args) logger.info("Building the fields") t = TransformerTokenizer(name=args.bart) SRC = Field('src', pad=t.pad, unk=t.unk, bos=t.bos, eos=t.eos, tokenize=t) TGT = Field('tgt', pad=t.pad, unk=t.unk, bos=t.bos, eos=t.eos, tokenize=t) transform = Text(SRC=SRC, TGT=TGT) # share the vocab SRC.vocab = TGT.vocab = t.vocab args.update({ 'n_words': len(SRC.vocab), 'pad_index': SRC.pad_index, 'unk_index': SRC.unk_index, 'bos_index': SRC.bos_index, 'eos_index': SRC.eos_index }) logger.info(f"{transform}") logger.info("Building the model") model = cls.MODEL(**args) logger.info(f"{model}\n") parser = cls(args, model, transform) parser.model.to(parser.device) return parser class Seq2SeqDetector(Seq2SeqParser): NAME = 'seq2seq' MODEL = Seq2SeqDetectModel def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.SRC = self.transform.SRC self.TGT = self.transform.TGT (_, self.TGT_ERROR) = self.transform.TGTERROR def train(self, train: Union[str, Iterable], dev: Union[str, Iterable], test: Union[str, Iterable], epochs: int, patience: int, batch_size: int = 5000, update_steps: int = 1, buckets: int = 32, workers: int = 0, clip: float = 5.0, amp: bool = False, cache: bool = False, verbose: bool = True, **kwargs) -> None: args = self.args.update(locals()) init_logger(logger, verbose=args.verbose) self.transform.train() batch_size = batch_size // update_steps if dist.is_initialized(): batch_size = batch_size // dist.get_world_size() logger.info("Loading the data") if args.cache: if args.bin_path is None: args.bin = os.path.join(os.path.dirname(args.path), 'bin') else: args.bin = args.bin_path train = Dataset(self.transform, args.train, **args).build(batch_size, buckets, True, dist.is_initialized(), workers, chunk_size=args.chunk_size, seed=args.seed) dev = Dataset(self.transform, args.dev, **args).build(batch_size, buckets, False, dist.is_initialized(), workers) logger.info(f"{'train:':6} {train}") if not args.test: logger.info(f"{'dev:':6} {dev}\n") else: test = Dataset(self.transform, args.test, **args).build(batch_size, buckets, False, dist.is_initialized(), workers) logger.info(f"{'dev:':6} {dev}") logger.info(f"{'test:':6} {test}\n") def ged_param(name): if name.startswith("encoder."): return False elif name.startswith("decoder."): return False else: return True no_decay = [] self.optimizer = AdamW([{ 'params': p, 'lr': args.lr * (1 if not ged_param(n) else args.lr_rate), "weight_decay": args.weight_decay if not any(nd in n for nd in no_decay) else 0.0, } for n, p in self.model.named_parameters()], args.lr, (args.mu, args.nu), args.eps, args.weight_decay) self.scheduler = ExponentialLR(self.optimizer, args.decay**(1 / args.decay_steps)) self.scaler = GradScaler(enabled=args.amp) if dist.is_initialized(): self.model = DDP(self.model, device_ids=[args.local_rank], find_unused_parameters=args.get( 'find_unused_parameters', True)) if args.amp: self.model.register_comm_hook(dist.group.WORLD, fp16_compress_hook) self.step, self.epoch, self.best_e, self.patience, self.n_batches = 1, 1, 1, patience, len( train.loader) self.best_metric, self.elapsed = Metric(), timedelta() if self.args.checkpoint: try: self.optimizer.load_state_dict( self.checkpoint_state_dict.pop('optimizer_state_dict')) self.scheduler.load_state_dict( self.checkpoint_state_dict.pop('scheduler_state_dict')) self.scaler.load_state_dict( self.checkpoint_state_dict.pop('scaler_state_dict')) set_rng_state(self.checkpoint_state_dict.pop('rng_state')) for k, v in self.checkpoint_state_dict.items(): setattr(self, k, v) train.loader.batch_sampler.epoch = self.epoch except AttributeError: logger.warning( "No checkpoint found. Try re-launching the traing procedure instead" ) for epoch in range(self.epoch, args.epochs + 1): start = datetime.now() bar, metric = progress_bar(train.loader), Metric() logger.info(f"Epoch {epoch} / {args.epochs}:") self.model.train() if self.epoch == 1: torch.cuda.empty_cache() with self.join(): # we should zero `step` as the number of batches in different processes is not necessarily equal self.step = 0 for batch in bar: with self.sync(): with torch.autocast(self.device, enabled=self.args.amp): loss = self.train_step(batch) self.backward(loss) if self.sync_grad: self.clip_grad_norm_(self.model.parameters(), self.args.clip) self.scaler.step(self.optimizer) self.scaler.update() self.scheduler.step() self.optimizer.zero_grad(True) bar.set_postfix_str( f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f}" ) self.step += 1 logger.info(f"{bar.postfix}") self.model.eval() with self.join(), torch.autocast(self.device, enabled=self.args.amp): metric = self.reduce( sum([self.eval_step(i) for i in progress_bar(dev.loader)], Metric())) logger.info(f"{'dev:':5} {metric}") if args.test: test_metric = sum( [self.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {self.reduce(test_metric)}") t = datetime.now() - start self.epoch += 1 self.patience -= 1 self.elapsed += t if metric > self.best_metric: self.best_e, self.patience, self.best_metric = epoch, patience, metric if is_master(): self.save_checkpoint(args.path) logger.info(f"{t}s elapsed (saved)\n") else: logger.info(f"{t}s elapsed\n") if self.patience < 1: break if dist.is_initialized(): dist.barrier() best = self.load(**args) # only allow the master device to save models if is_master(): best.save(args.path) logger.info(f"Epoch {self.best_e} saved") logger.info(f"{'dev:':5} {self.best_metric}") if args.test: best.model.eval() with best.join(): test_metric = sum( [best.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {best.reduce(test_metric)}") logger.info(f"{self.elapsed}s elapsed, {self.elapsed / epoch}s/epoch") def train_step(self, batch: Batch) -> torch.Tensor: src, tgt, _, src_error, _, tgt_error = batch src_mask, tgt_mask = src.ne(self.args.pad_index), tgt.ne( self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_error, tgt_error, src_mask, tgt_mask) return loss @torch.no_grad() def eval_step(self, batch: Batch) -> PerplexityMetric: src, tgt, _, src_error, tgt_error_raw, tgt_error = batch src_mask, tgt_mask = src.ne(self.args.pad_index), tgt.ne( self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_error, tgt_error, src_mask, tgt_mask) def error_label_factorize(errors): return sum( [[(i, i + 1, e) for e in eb.split("::")] for i, eb in enumerate(errors) if eb not in {'CORRECT', NUL}], []) ged_golds = [error_label_factorize(e) for e in tgt_error_raw] ged_preds = [ error_label_factorize( [self.TGT_ERROR.vocab[i] for i in e if i >= 0]) for e in self.model.decode(x, tgt, src_mask, tgt_mask).tolist() ]
return SpanMetric(loss, ged_preds, ged_golds)
3
2023-10-18 10:55:33+00:00
24k
boppreh/hello_tls
src/hello_tls/protocol.py
[ { "identifier": "Protocol", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class Protocol(Enum):\n # Keep protocols in order of preference.\n TLS1_3 = b\"\\x03\\x04\"\n TLS1_2 = b\"\\x03\\x03\"\n TLS1_1 = b\"\\x03\\x02\"\n TLS1_0 = b\"\\x03\\x01\"\n SSLv3 = b\"\\x03\\x00\"\n...
from typing import Iterator, List, Sequence, Optional, Iterable, Callable, Tuple from contextlib import contextmanager from dataclasses import dataclass from .names_and_numbers import Protocol, RecordType, HandshakeType, CompressionMethod, CipherSuite, ExtensionType, Group, AlertLevel, AlertDescription, PskKeyExchangeMode import logging
14,587
logger = logging.getLogger(__name__) class ScanError(Exception): """ Base error class for errors that occur during scanning. """ pass class ServerAlertError(ScanError):
logger = logging.getLogger(__name__) class ScanError(Exception): """ Base error class for errors that occur during scanning. """ pass class ServerAlertError(ScanError):
def __init__(self, level: AlertLevel, description: AlertDescription):
8
2023-10-21 02:00:13+00:00
24k
zhaojw1998/AccoMontage-3
arrangement_utils.py
[ { "identifier": "split_phrases", "path": "piano_arranger/acc_utils.py", "snippet": "def split_phrases(segmentation):\n \"\"\"Split a phrase label string into individual phrase meta info\"\"\"\n if '\\n' not in segmentation:\n segmentation += '\\n'\n phrases = []\n lengths = []\n cu...
import os import pretty_midi as pyd import numpy as np import torch import piano_arranger.format_converter as cvt from torch.utils.data import DataLoader from scipy.interpolate import interp1d from tqdm import tqdm from piano_arranger.acc_utils import split_phrases from piano_arranger.models import DisentangleVAE from piano_arranger.AccoMontage import find_by_length, dp_search, re_harmonization, get_texture_filter, ref_spotlight from orchestrator import Slakh2100_Pop909_Dataset, collate_fn, compute_pr_feat, EMBED_PROGRAM_MAPPING, Prior from orchestrator.QA_dataset import SLAKH_CLASS_PROGRAMS from orchestrator.utils import grid2pr, pr2grid, matrix2midi, midi2matrix from orchestrator.prior_dataset import TOTAL_LEN_BIN, ABS_POS_BIN, REL_POS_BIN
18,858
SLAKH_CLASS_MAPPING = {v: k for k, v in EMBED_PROGRAM_MAPPING.items()} def load_premise(DATA_FILE_ROOT, DEVICE): """Load AccoMontage Search Space""" print('Loading AccoMontage piano texture search space. This may take 1 or 2 minutes ...') data = np.load(os.path.join(DATA_FILE_ROOT, 'phrase_data.npz'), allow_pickle=True) melody = data['melody'] acc = data['acc'] chord = data['chord'] vel = data['velocity'] cc = data['cc'] acc_pool = {} for LEN in tqdm(range(2, 13)): (mel, acc_, chord_, vel_, cc_, song_reference) = find_by_length(melody, acc, chord, vel, cc, LEN) acc_pool[LEN] = (mel, acc_, chord_, vel_, cc_, song_reference) texture_filter = get_texture_filter(acc_pool) edge_weights=np.load(os.path.join(DATA_FILE_ROOT, 'edge_weights.npz'), allow_pickle=True) """Load Q&A Prompt Search Space""" print('loading orchestration prompt search space ...') slakh_dir = os.path.join(DATA_FILE_ROOT, 'Slakh2100_inference_set') dataset = Slakh2100_Pop909_Dataset(slakh_dir=slakh_dir, pop909_dir=None, debug_mode=False, split='validation', mode='train') loader = DataLoader(dataset, batch_size=1, shuffle=True, collate_fn=lambda b:collate_fn(b, DEVICE)) REF = [] REF_PROG = [] REF_MIX = [] for (_, prog, function, _, _, _) in loader: prog = prog[0, :] REF.extend([batch for batch in function]) REF_PROG.extend([prog for _ in range(len(function))]) REF_MIX.append(torch.sum(function, dim=1)) REF_MIX = torch.cat(REF_MIX, dim=0) """Initialize orchestration model (Prior + Q&A)""" print('Initialize model ...') prior_model_path = os.path.join(DATA_FILE_ROOT, 'params_prior.pt') QaA_model_path = os.path.join(DATA_FILE_ROOT, 'params_qa.pt') orchestrator = Prior.init_inference_model(prior_model_path, QaA_model_path, DEVICE=DEVICE) orchestrator.to(DEVICE) orchestrator.eval() piano_arranger = DisentangleVAE.init_model(torch.device('cuda')).cuda() piano_arranger.load_state_dict(torch.load(os.path.join(DATA_FILE_ROOT, 'params_reharmonizer.pt'))) print('Finished.') return piano_arranger, orchestrator, (acc_pool, edge_weights, texture_filter), (REF, REF_PROG, REF_MIX) def read_lead_sheet(DEMO_ROOT, SONG_NAME, SEGMENTATION, NOTE_SHIFT, melody_track_ID=0): melody_roll, chord_roll = cvt.leadsheet2matrix(os.path.join(DEMO_ROOT, SONG_NAME, 'lead sheet.mid'), melody_track_ID) assert(len(melody_roll == len(chord_roll))) if NOTE_SHIFT != 0: melody_roll = melody_roll[int(NOTE_SHIFT*4):, :] chord_roll = chord_roll[int(NOTE_SHIFT*4):, :] if len(melody_roll) % 16 != 0: pad_len = (len(melody_roll)//16+1)*16-len(melody_roll) melody_roll = np.pad(melody_roll, ((0, pad_len), (0, 0))) melody_roll[-pad_len:, -1] = 1 chord_roll = np.pad(chord_roll, ((0, pad_len), (0, 0))) chord_roll[-pad_len:, 0] = -1 chord_roll[-pad_len:, -1] = -1 CHORD_TABLE = np.stack([cvt.expand_chord(chord) for chord in chord_roll[::4]], axis=0) LEADSHEET = np.concatenate((melody_roll, chord_roll[:, 1: -1]), axis=-1) #T*142, quantized at 16th query_phrases = split_phrases(SEGMENTATION) #[('A', 8, 0), ('A', 8, 8), ('B', 8, 16), ('B', 8, 24)] midi_len = len(LEADSHEET)//16 anno_len = sum([item[1] for item in query_phrases]) if midi_len > anno_len: LEADSHEET = LEADSHEET[: anno_len*16] CHORD_TABLE = CHORD_TABLE[: anno_len*4] print(f'Mismatch warning: Detect {midi_len} bars in the lead sheet (MIDI) and {anno_len} bars in the provided phrase annotation. The lead sheet is truncated to {anno_len} bars.') elif midi_len < anno_len: pad_len = (anno_len - midi_len)*16 LEADSHEET = np.pad(LEADSHEET, ((0, pad_len), (0, 0))) LEADSHEET[-pad_len:, 129] = 1 CHORD_TABLE = np.pad(CHORD_TABLE, ((0, pad_len//4), (0, 0))) CHORD_TABLE[-pad_len//4:, 11] = -1 CHORD_TABLE[-pad_len//4:, -1] = -1 print(f'Mismatch warning: Detect {midi_len} bars in the lead sheet (MIDI) and {anno_len} bars in the provided phrase annotation. The lead sheet is padded to {anno_len} bars.') melody_queries = [] for item in query_phrases: start_bar = item[-1] length = item[-2] segment = LEADSHEET[start_bar*16: (start_bar+length)*16] melody_queries.append(segment) #melody queries: list of T16*142, segmented by phrases return (LEADSHEET, CHORD_TABLE, melody_queries, query_phrases) def piano_arrangement(pianoRoll, chord_table, melody_queries, query_phrases, acc_pool, edge_weights, texture_filter, piano_arranger, PREFILTER, tempo=100): print('Phrasal Unit selection begins:\n\t', f'{len(query_phrases)} phrases in the lead sheet;\n\t', f'set note density filter: {PREFILTER}.') phrase_indice, chord_shift = dp_search( melody_queries, query_phrases, acc_pool, edge_weights, texture_filter, filter_id=PREFILTER) path = phrase_indice[0] shift = chord_shift[0] print('Re-harmonization begins ...') midi_recon, acc = re_harmonization(pianoRoll, chord_table, query_phrases, path, shift, acc_pool, model=piano_arranger, get_est=True, tempo=tempo)
SLAKH_CLASS_MAPPING = {v: k for k, v in EMBED_PROGRAM_MAPPING.items()} def load_premise(DATA_FILE_ROOT, DEVICE): """Load AccoMontage Search Space""" print('Loading AccoMontage piano texture search space. This may take 1 or 2 minutes ...') data = np.load(os.path.join(DATA_FILE_ROOT, 'phrase_data.npz'), allow_pickle=True) melody = data['melody'] acc = data['acc'] chord = data['chord'] vel = data['velocity'] cc = data['cc'] acc_pool = {} for LEN in tqdm(range(2, 13)): (mel, acc_, chord_, vel_, cc_, song_reference) = find_by_length(melody, acc, chord, vel, cc, LEN) acc_pool[LEN] = (mel, acc_, chord_, vel_, cc_, song_reference) texture_filter = get_texture_filter(acc_pool) edge_weights=np.load(os.path.join(DATA_FILE_ROOT, 'edge_weights.npz'), allow_pickle=True) """Load Q&A Prompt Search Space""" print('loading orchestration prompt search space ...') slakh_dir = os.path.join(DATA_FILE_ROOT, 'Slakh2100_inference_set') dataset = Slakh2100_Pop909_Dataset(slakh_dir=slakh_dir, pop909_dir=None, debug_mode=False, split='validation', mode='train') loader = DataLoader(dataset, batch_size=1, shuffle=True, collate_fn=lambda b:collate_fn(b, DEVICE)) REF = [] REF_PROG = [] REF_MIX = [] for (_, prog, function, _, _, _) in loader: prog = prog[0, :] REF.extend([batch for batch in function]) REF_PROG.extend([prog for _ in range(len(function))]) REF_MIX.append(torch.sum(function, dim=1)) REF_MIX = torch.cat(REF_MIX, dim=0) """Initialize orchestration model (Prior + Q&A)""" print('Initialize model ...') prior_model_path = os.path.join(DATA_FILE_ROOT, 'params_prior.pt') QaA_model_path = os.path.join(DATA_FILE_ROOT, 'params_qa.pt') orchestrator = Prior.init_inference_model(prior_model_path, QaA_model_path, DEVICE=DEVICE) orchestrator.to(DEVICE) orchestrator.eval() piano_arranger = DisentangleVAE.init_model(torch.device('cuda')).cuda() piano_arranger.load_state_dict(torch.load(os.path.join(DATA_FILE_ROOT, 'params_reharmonizer.pt'))) print('Finished.') return piano_arranger, orchestrator, (acc_pool, edge_weights, texture_filter), (REF, REF_PROG, REF_MIX) def read_lead_sheet(DEMO_ROOT, SONG_NAME, SEGMENTATION, NOTE_SHIFT, melody_track_ID=0): melody_roll, chord_roll = cvt.leadsheet2matrix(os.path.join(DEMO_ROOT, SONG_NAME, 'lead sheet.mid'), melody_track_ID) assert(len(melody_roll == len(chord_roll))) if NOTE_SHIFT != 0: melody_roll = melody_roll[int(NOTE_SHIFT*4):, :] chord_roll = chord_roll[int(NOTE_SHIFT*4):, :] if len(melody_roll) % 16 != 0: pad_len = (len(melody_roll)//16+1)*16-len(melody_roll) melody_roll = np.pad(melody_roll, ((0, pad_len), (0, 0))) melody_roll[-pad_len:, -1] = 1 chord_roll = np.pad(chord_roll, ((0, pad_len), (0, 0))) chord_roll[-pad_len:, 0] = -1 chord_roll[-pad_len:, -1] = -1 CHORD_TABLE = np.stack([cvt.expand_chord(chord) for chord in chord_roll[::4]], axis=0) LEADSHEET = np.concatenate((melody_roll, chord_roll[:, 1: -1]), axis=-1) #T*142, quantized at 16th query_phrases = split_phrases(SEGMENTATION) #[('A', 8, 0), ('A', 8, 8), ('B', 8, 16), ('B', 8, 24)] midi_len = len(LEADSHEET)//16 anno_len = sum([item[1] for item in query_phrases]) if midi_len > anno_len: LEADSHEET = LEADSHEET[: anno_len*16] CHORD_TABLE = CHORD_TABLE[: anno_len*4] print(f'Mismatch warning: Detect {midi_len} bars in the lead sheet (MIDI) and {anno_len} bars in the provided phrase annotation. The lead sheet is truncated to {anno_len} bars.') elif midi_len < anno_len: pad_len = (anno_len - midi_len)*16 LEADSHEET = np.pad(LEADSHEET, ((0, pad_len), (0, 0))) LEADSHEET[-pad_len:, 129] = 1 CHORD_TABLE = np.pad(CHORD_TABLE, ((0, pad_len//4), (0, 0))) CHORD_TABLE[-pad_len//4:, 11] = -1 CHORD_TABLE[-pad_len//4:, -1] = -1 print(f'Mismatch warning: Detect {midi_len} bars in the lead sheet (MIDI) and {anno_len} bars in the provided phrase annotation. The lead sheet is padded to {anno_len} bars.') melody_queries = [] for item in query_phrases: start_bar = item[-1] length = item[-2] segment = LEADSHEET[start_bar*16: (start_bar+length)*16] melody_queries.append(segment) #melody queries: list of T16*142, segmented by phrases return (LEADSHEET, CHORD_TABLE, melody_queries, query_phrases) def piano_arrangement(pianoRoll, chord_table, melody_queries, query_phrases, acc_pool, edge_weights, texture_filter, piano_arranger, PREFILTER, tempo=100): print('Phrasal Unit selection begins:\n\t', f'{len(query_phrases)} phrases in the lead sheet;\n\t', f'set note density filter: {PREFILTER}.') phrase_indice, chord_shift = dp_search( melody_queries, query_phrases, acc_pool, edge_weights, texture_filter, filter_id=PREFILTER) path = phrase_indice[0] shift = chord_shift[0] print('Re-harmonization begins ...') midi_recon, acc = re_harmonization(pianoRoll, chord_table, query_phrases, path, shift, acc_pool, model=piano_arranger, get_est=True, tempo=tempo)
acc = np.array([grid2pr(matrix) for matrix in acc])
13
2023-10-23 12:36:57+00:00
24k
liuqidong07/MOELoRA-peft
src/MLoRA/peft/peft_model.py
[ { "identifier": "PeftConfig", "path": "src/MLoRA/peft/utils/config.py", "snippet": "class PeftConfig(PeftConfigMixin):\n \"\"\"\n This is the base configuration class to store the configuration of a [`PeftModel`].\n\n Args:\n peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The...
import inspect import os import warnings import torch import torch.nn as nn from contextlib import contextmanager from accelerate import dispatch_model, infer_auto_device_map from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules from accelerate.utils import get_balanced_memory from huggingface_hub import hf_hub_download from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers import PreTrainedModel from transformers.modeling_outputs import SequenceClassifierOutput, TokenClassifierOutput from transformers.utils import PushToHubMixin from .utils import PeftConfig from .shared import Gate, GateN from .tuners import ( AdaLoraModel, AdaptionPromptModel, LoraModel, PrefixEncoder, PromptEmbedding, PromptEncoder, MMOELoraModelS, ) from .utils import ( TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, PeftConfig, PeftType, PromptLearningConfig, TaskType, _set_adapter, _set_trainable, get_peft_model_state_dict, set_peft_model_state_dict, shift_tokens_right, ) from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING
16,973
) model_kwargs["past_key_values"] = past_key_values else: if model_kwargs["past_key_values"] is None: inputs_embeds = self.word_embeddings(model_kwargs["input_ids"]) prompts = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0]) prompts = prompts.to(inputs_embeds.dtype) model_kwargs["inputs_embeds"] = torch.cat((prompts, inputs_embeds), dim=1) model_kwargs["input_ids"] = None return model_kwargs class PeftModelForSeq2SeqLM(PeftModel): """ Peft model for sequence-to-sequence language modeling. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. Example: ```py >>> from transformers import AutoModelForSeq2SeqLM >>> from peft import PeftModelForSeq2SeqLM, get_peft_config >>> config = { ... "peft_type": "LORA", ... "task_type": "SEQ_2_SEQ_LM", ... "inference_mode": False, ... "r": 8, ... "target_modules": ["q", "v"], ... "lora_alpha": 32, ... "lora_dropout": 0.1, ... "merge_weights": False, ... "fan_in_fan_out": False, ... "enable_lora": None, ... "bias": "none", ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> peft_model = PeftModelForSeq2SeqLM(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 884736 || all params: 223843584 || trainable%: 0.3952474242013566 ``` """ def __init__(self, model, peft_config: PeftConfig, adapter_name="default"): super().__init__(model, peft_config, adapter_name) self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation self.base_model_prepare_encoder_decoder_kwargs_for_generation = ( self.base_model._prepare_encoder_decoder_kwargs_for_generation ) def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, decoder_input_ids=None, decoder_attention_mask=None, decoder_inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): peft_config = self.active_peft_config if not isinstance(peft_config, PromptLearningConfig): return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_inputs_embeds=decoder_inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = input_ids.shape[0] if decoder_attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(self.device) decoder_attention_mask = torch.cat((prefix_attention_mask, decoder_attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids") kwargs["token_type_ids"] = None kwargs.update( { "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "labels": labels, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: past_key_values = self.get_prompt(batch_size) return self.base_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, past_key_values=past_key_values, **kwargs ) else: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if decoder_inputs_embeds is None and decoder_input_ids is None:
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. PEFT_TYPE_TO_MODEL_MAPPING = { PeftType.LORA: LoraModel, PeftType.PROMPT_TUNING: PromptEmbedding, PeftType.P_TUNING: PromptEncoder, PeftType.PREFIX_TUNING: PrefixEncoder, PeftType.ADALORA: AdaLoraModel, PeftType.ADAPTION_PROMPT: AdaptionPromptModel, PeftType.MMOELORAS: MMOELoraModelS, } class PeftModel(PushToHubMixin, torch.nn.Module): """ Base model encompassing various Peft methods. Args: model ([`~transformers.PreTrainedModel`]): The base transformer model used for Peft. peft_config ([`PeftConfig`]): The configuration of the Peft model. **Attributes**: - **base_model** ([`~transformers.PreTrainedModel`]) -- The base transformer model used for Peft. - **peft_config** ([`PeftConfig`]) -- The configuration of the Peft model. - **modules_to_save** (`list` of `str`) -- The list of sub-module names to save when saving the model. - **prompt_encoder** ([`PromptEncoder`]) -- The prompt encoder used for Peft if using [`PromptLearningConfig`]. - **prompt_tokens** (`torch.Tensor`) -- The virtual prompt tokens used for Peft if using [`PromptLearningConfig`]. - **transformer_backbone_name** (`str`) -- The name of the transformer backbone in the base model if using [`PromptLearningConfig`]. - **word_embeddings** (`torch.nn.Embedding`) -- The word embeddings of the transformer backbone in the base model if using [`PromptLearningConfig`]. """ def __init__(self, model, peft_config: PeftConfig, adapter_name="default"): super().__init__() self.base_model = model self.config = self.base_model.config self.modules_to_save = None self.peft_config = {} self.active_adapter = adapter_name self.peft_type = peft_config.peft_type self.base_model_torch_dtype = getattr(model, "dtype", None) if not isinstance(peft_config, PromptLearningConfig): self.peft_config[adapter_name] = peft_config self.base_model = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type]( self.base_model, self.peft_config, adapter_name ) self.set_additional_trainable_modules(peft_config, adapter_name) else: self.add_adapter(adapter_name, peft_config) def save_pretrained(self, save_directory, **kwargs): r""" This function saves the adapter model and the adapter configuration files to a directory, so that it can be reloaded using the [`LoraModel.from_pretrained`] class method, and also used by the [`LoraModel.push_to_hub`] method. Args: save_directory (`str`): Directory where the adapter model and configuration files will be saved (will be created if it does not exist). kwargs (additional keyword arguments, *optional*): Additional keyword arguments passed along to the `push_to_hub` method. """ if os.path.isfile(save_directory): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file") os.makedirs(save_directory, exist_ok=True) for adapter_name, peft_config in self.peft_config.items(): # save only the trainable weights output_state_dict = get_peft_model_state_dict( self, state_dict=kwargs.get("state_dict", None), adapter_name=adapter_name ) # save the weights based on the adapter name output_dir = os.path.join(save_directory, adapter_name) if adapter_name != "default" else save_directory os.makedirs(output_dir, exist_ok=True) torch.save(output_state_dict, os.path.join(output_dir, WEIGHTS_NAME)) # save the config and change the inference mode to `True` if peft_config.base_model_name_or_path is None: peft_config.base_model_name_or_path = ( self.base_model.__dict__.get("name_or_path", None) if isinstance(peft_config, PromptLearningConfig) else self.base_model.model.__dict__.get("name_or_path", None) ) inference_mode = peft_config.inference_mode peft_config.inference_mode = True peft_config.save_pretrained(output_dir) # save the config to file peft_config.inference_mode = inference_mode @classmethod def from_pretrained(cls, model, model_id, adapter_name="default", is_trainable=False, **kwargs): r""" Instantiate a [`LoraModel`] from a pretrained Lora configuration and weights. Args: model ([`~transformers.PreTrainedModel`]): The model to be adapted. The model should be initialized with the [`~transformers.PreTrainedModel.from_pretrained`] method from the 🤗 Transformers library. model_id (`str` or `os.PathLike`): The name of the Lora configuration to use. Can be either: - A string, the `model id` of a Lora configuration hosted inside a model repo on the Hugging Face Hub. - A path to a directory containing a Lora configuration file saved using the `save_pretrained` method (`./my_lora_config_directory/`). """ # load the config config = PEFT_TYPE_TO_CONFIG_MAPPING[ PeftConfig.from_pretrained(model_id, subfolder=kwargs.get("subfolder", None)).peft_type ].from_pretrained(model_id, subfolder=kwargs.get("subfolder", None)) if (getattr(model, "hf_device_map", None) is not None) and len( set(model.hf_device_map.values()).intersection({"cpu", "disk"}) ) > 0: remove_hook_from_submodules(model) if isinstance(config, PromptLearningConfig) and is_trainable: raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") else: config.inference_mode = not is_trainable if config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys(): model = cls(model, config, adapter_name) else: model = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[config.task_type](model, config, adapter_name) # New a PeftModel model.load_adapter(model_id, adapter_name, **kwargs) return model def _setup_prompt_encoder(self, adapter_name): config = self.peft_config[adapter_name] self.prompt_encoder = torch.nn.ModuleDict({}) self.prompt_tokens = {} transformer_backbone = None for name, module in self.base_model.named_children(): for param in module.parameters(): param.requires_grad = False if isinstance(module, PreTrainedModel): # Make sure to freeze Tranformers model if transformer_backbone is None: transformer_backbone = module self.transformer_backbone_name = name if config.num_transformer_submodules is None: config.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1 for named_param, value in list(transformer_backbone.named_parameters()): if value.shape[0] == self.base_model.config.vocab_size: self.word_embeddings = transformer_backbone.get_submodule(named_param.replace(".weight", "")) break if config.peft_type == PeftType.PROMPT_TUNING: prompt_encoder = PromptEmbedding(config, self.word_embeddings) elif config.peft_type == PeftType.P_TUNING: prompt_encoder = PromptEncoder(config) elif config.peft_type == PeftType.PREFIX_TUNING: prompt_encoder = PrefixEncoder(config) else: raise ValueError("Not supported") self.prompt_encoder.update(torch.nn.ModuleDict({adapter_name: prompt_encoder})) self.prompt_tokens[adapter_name] = torch.arange( config.num_virtual_tokens * config.num_transformer_submodules ).long() def get_prompt_embedding_to_save(self, adapter_name): """ Returns the prompt embedding to save when saving the model. Only applicable when `peft_config.peft_type != PeftType.LORA`. """ prompt_tokens = self.prompt_tokens[adapter_name].unsqueeze(0).expand(1, -1).to(self.device) if self.peft_config[adapter_name].peft_type == PeftType.PREFIX_TUNING: prompt_tokens = prompt_tokens[:, : self.peft_config[adapter_name].num_virtual_tokens] prompt_embeddings = self.prompt_encoder[adapter_name](prompt_tokens) return prompt_embeddings[0].detach().cpu() def get_prompt(self, batch_size): """ Returns the virtual prompts to use for Peft. Only applicable when `peft_config.peft_type != PeftType.LORA`. """ peft_config = self.active_peft_config prompt_encoder = self.prompt_encoder[self.active_adapter] prompt_tokens = self.prompt_tokens[self.active_adapter].unsqueeze(0).expand(batch_size, -1).to(self.device) if peft_config.peft_type == PeftType.PREFIX_TUNING: prompt_tokens = prompt_tokens[:, : peft_config.num_virtual_tokens] if peft_config.inference_mode: past_key_values = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1) else: past_key_values = prompt_encoder(prompt_tokens) past_key_values = past_key_values.view( batch_size, peft_config.num_virtual_tokens, peft_config.num_layers * 2, peft_config.num_attention_heads, peft_config.token_dim // peft_config.num_attention_heads, ) if peft_config.num_transformer_submodules == 2: past_key_values = torch.cat([past_key_values, past_key_values], dim=2) past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split( peft_config.num_transformer_submodules * 2 ) if TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING.get(self.config.model_type, None) is not None: post_process_fn = TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING[self.config.model_type] past_key_values = post_process_fn(past_key_values) return past_key_values else: if peft_config.inference_mode: prompts = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1) else: prompts = prompt_encoder(prompt_tokens) return prompts def print_trainable_parameters(self): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in self.named_parameters(): num_params = param.numel() # if using DS Zero 3 and the weights are initialized empty if num_params == 0 and hasattr(param, "ds_numel"): num_params = param.ds_numel all_param += num_params if param.requires_grad: trainable_params += num_params print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" ) def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: return getattr(self.base_model, name) def forward(self, *args, **kwargs): """ Forward pass of the model. """ return self.get_base_model()(*args, **kwargs) @contextmanager def disable_adapter(self): """ Disables the adapter module. """ try: if isinstance(self.peft_config, PromptLearningConfig): old_forward = self.forward self.forward = self.base_model.forward else: self.base_model.disable_adapter_layers() yield finally: if isinstance(self.peft_config, PromptLearningConfig): self.forward = old_forward else: self.base_model.enable_adapter_layers() def get_base_model(self): """ Returns the base model. """ return self.base_model if isinstance(self.active_peft_config, PromptLearningConfig) else self.base_model.model def add_adapter(self, adapter_name, peft_config): if peft_config.peft_type != self.peft_type: raise ValueError( f"Cannot combine adapters with different peft types. " f"Found {self.peft_type} and {peft_config.peft_type}." ) self.peft_config[adapter_name] = peft_config if isinstance(peft_config, PromptLearningConfig): self._setup_prompt_encoder(adapter_name) else: self.base_model.add_adapter(adapter_name, peft_config) self.set_additional_trainable_modules(peft_config, adapter_name) def set_additional_trainable_modules(self, peft_config, adapter_name): if getattr(peft_config, "modules_to_save", None) is not None: if self.modules_to_save is None: self.modules_to_save = set(peft_config.modules_to_save) else: self.modules_to_save.update(peft_config.modules_to_save) _set_trainable(self, adapter_name) def load_adapter(self, model_id, adapter_name, is_trainable=False, **kwargs): if adapter_name not in self.peft_config: # load the config peft_config = PEFT_TYPE_TO_CONFIG_MAPPING[ PeftConfig.from_pretrained(model_id, subfolder=kwargs.get("subfolder", None)).peft_type ].from_pretrained(model_id, subfolder=kwargs.get("subfolder", None)) if isinstance(peft_config, PromptLearningConfig) and is_trainable: raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") else: peft_config.inference_mode = not is_trainable self.add_adapter(adapter_name, peft_config) # load weights if any path = os.path.join(model_id, kwargs["subfolder"]) if kwargs.get("subfolder", None) is not None else model_id if os.path.exists(os.path.join(path, WEIGHTS_NAME)): filename = os.path.join(path, WEIGHTS_NAME) else: try: filename = hf_hub_download(model_id, WEIGHTS_NAME, subfolder=kwargs.get("subfolder", None)) except: # noqa raise ValueError( f"Can't find weights for {model_id} in {model_id} or in the Hugging Face Hub. " f"Please check that the file {WEIGHTS_NAME} is present at {model_id}." ) adapters_weights = torch.load( filename, map_location=torch.device("cuda" if torch.cuda.is_available() else "cpu") ) # load the weights into the model set_peft_model_state_dict(self, adapters_weights, adapter_name=adapter_name) if ( (getattr(self, "hf_device_map", None) is not None) and (len(set(self.hf_device_map.values()).intersection({"cpu", "disk"})) > 0) and len(self.peft_config) == 1 ): device_map = kwargs.get("device_map", "auto") max_memory = kwargs.get("max_memory", None) offload_dir = kwargs.get("offload_folder", None) offload_index = kwargs.get("offload_index", None) dispatch_model_kwargs = {} # Safety checker for previous `accelerate` versions # `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/ if "offload_index" in inspect.signature(dispatch_model).parameters: dispatch_model_kwargs["offload_index"] = offload_index no_split_module_classes = self._no_split_modules if device_map != "sequential": max_memory = get_balanced_memory( self, max_memory=max_memory, no_split_module_classes=no_split_module_classes, low_zero=(device_map == "balanced_low_0"), ) if isinstance(device_map, str): device_map = infer_auto_device_map( self, max_memory=max_memory, no_split_module_classes=no_split_module_classes ) dispatch_model( self, device_map=device_map, offload_dir=offload_dir, **dispatch_model_kwargs, ) hook = AlignDevicesHook(io_same_device=True) if isinstance(self.peft_config[adapter_name], PromptLearningConfig): remove_hook_from_submodules(self.prompt_encoder) add_hook_to_module(self.get_base_model(), hook) # Set model in evaluation mode to deactivate Dropout modules by default self.eval() def set_adapter(self, adapter_name): """ Sets the active adapter. """ if adapter_name not in self.peft_config: raise ValueError(f"Adapter {adapter_name} not found.") self.active_adapter = adapter_name if not isinstance(self.peft_config[adapter_name], PromptLearningConfig): self.base_model.set_adapter(adapter_name) _set_adapter(self, adapter_name) @property def active_peft_config(self): return self.peft_config[self.active_adapter] class PeftModelForSequenceClassification(PeftModel): """ Peft model for sequence classification tasks. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. **Attributes**: - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. - **cls_layer_name** (`str`) -- The name of the classification layer. Example: ```py >>> from transformers import AutoModelForSequenceClassification >>> from peft import PeftModelForSequenceClassification, get_peft_config >>> config = { ... "peft_type": "PREFIX_TUNING", ... "task_type": "SEQ_CLS", ... "inference_mode": False, ... "num_virtual_tokens": 20, ... "token_dim": 768, ... "num_transformer_submodules": 1, ... "num_attention_heads": 12, ... "num_layers": 12, ... "encoder_hidden_size": 768, ... "prefix_projection": False, ... "postprocess_past_key_value_function": None, ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased") >>> peft_model = PeftModelForSequenceClassification(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117 ``` """ def __init__(self, model, peft_config: PeftConfig, adapter_name="default"): super().__init__(model, peft_config, adapter_name) if self.modules_to_save is None: self.modules_to_save = {"classifier", "score"} else: self.modules_to_save.update({"classifier", "score"}) for name, _ in self.base_model.named_children(): if any(module_name in name for module_name in self.modules_to_save): self.cls_layer_name = name break # to make sure classifier layer is trainable _set_trainable(self, adapter_name) def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict peft_config = self.active_peft_config if not isinstance(peft_config, PromptLearningConfig): return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = input_ids.shape[0] if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(self.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None kwargs.update( { "attention_mask": attention_mask, "labels": labels, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: return self._prefix_tuning_forward(input_ids=input_ids, **kwargs) else: if kwargs.get("token_type_ids", None) is not None: kwargs["token_type_ids"] = torch.cat( ( torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.device), kwargs["token_type_ids"], ), dim=1, ).long() if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) prompts = self.get_prompt(batch_size=batch_size) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) return self.base_model(inputs_embeds=inputs_embeds, **kwargs) def _prefix_tuning_forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): batch_size = input_ids.shape[0] past_key_values = self.get_prompt(batch_size) fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys()) kwargs.update( { "input_ids": input_ids, "attention_mask": attention_mask, "inputs_embeds": inputs_embeds, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, "past_key_values": past_key_values, } ) if "past_key_values" in fwd_params: return self.base_model(labels=labels, **kwargs) else: transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name) fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys()) if "past_key_values" not in fwd_params: raise ValueError("Model does not support past key values which are required for prefix tuning.") outputs = transformer_backbone_name(**kwargs) pooled_output = outputs[1] if len(outputs) > 1 else outputs[0] if "dropout" in [name for name, _ in list(self.base_model.named_children())]: pooled_output = self.base_model.dropout(pooled_output) logits = self.base_model.get_submodule(self.cls_layer_name)(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.base_model.num_labels == 1: self.config.problem_type = "regression" elif self.base_model.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.base_model.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.base_model.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class PeftModelForCausalLM(PeftModel): """ Peft model for causal language modeling. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. Example: ```py >>> from transformers import AutoModelForCausalLM >>> from peft import PeftModelForCausalLM, get_peft_config >>> config = { ... "peft_type": "PREFIX_TUNING", ... "task_type": "CAUSAL_LM", ... "inference_mode": False, ... "num_virtual_tokens": 20, ... "token_dim": 1280, ... "num_transformer_submodules": 1, ... "num_attention_heads": 20, ... "num_layers": 36, ... "encoder_hidden_size": 1280, ... "prefix_projection": False, ... "postprocess_past_key_value_function": None, ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForCausalLM.from_pretrained("gpt2-large") >>> peft_model = PeftModelForCausalLM(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 1843200 || all params: 775873280 || trainable%: 0.23756456724479544 ``` """ def __init__(self, model, peft_config: PeftConfig, adapter_name="default"): super().__init__(model, peft_config, adapter_name) self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): peft_config = self.active_peft_config if not isinstance(peft_config, PromptLearningConfig): return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = input_ids.shape[0] if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(self.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids") kwargs["token_type_ids"] = None kwargs.update( { "attention_mask": attention_mask, "labels": labels, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: past_key_values = self.get_prompt(batch_size) return self.base_model(input_ids=input_ids, past_key_values=past_key_values, **kwargs) else: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) # concat prompt labels if labels is not None: prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(self.device) kwargs["labels"] = torch.cat((prefix_labels, labels), dim=1) prompts = self.get_prompt(batch_size=batch_size) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) return self.base_model(inputs_embeds=inputs_embeds, **kwargs) def generate(self, **kwargs): peft_config = self.active_peft_config self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation try: if not isinstance(peft_config, PromptLearningConfig): outputs = self.base_model.generate(**kwargs) else: if "input_ids" not in kwargs: raise ValueError("input_ids must be provided for Peft model generation") # For gpt2 models, we construct postion_ids on the fly by using attention mask, and position ids need to match input_shape. # for prefix tuning, input shape is determined using `input_ids`. Thus we should not expand 'attention_mask' here # for prompt tuning input_ids is not passed but a concatenated input_embeds is passed. Thus attention_mask needs to be of same size of num_virtual_tokens + input_ids if kwargs.get("attention_mask", None) is not None and peft_config.peft_type in [ PeftType.PROMPT_TUNING, PeftType.P_TUNING, ]: # concat prompt attention mask prefix_attention_mask = torch.ones( kwargs["input_ids"].shape[0], peft_config.num_virtual_tokens ).to(kwargs["input_ids"].device) kwargs["attention_mask"] = torch.cat((prefix_attention_mask, kwargs["attention_mask"]), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn( "Position ids are not supported for parameter efficient tuning. Ignoring position ids." ) kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn( "Token type ids are not supported for parameter efficient tuning. Ignoring token type ids" ) kwargs["token_type_ids"] = None outputs = self.base_model.generate(**kwargs) except: self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation raise else: self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation return outputs def prepare_inputs_for_generation(self, *args, **kwargs): peft_config = self.active_peft_config model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs) if isinstance(peft_config, PromptLearningConfig): if peft_config.peft_type == PeftType.PREFIX_TUNING: prefix_attention_mask = torch.ones( model_kwargs["input_ids"].shape[0], peft_config.num_virtual_tokens ).to(model_kwargs["input_ids"].device) model_kwargs["attention_mask"] = torch.cat( (prefix_attention_mask, model_kwargs["attention_mask"]), dim=1 ) if model_kwargs["past_key_values"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING: past_key_values = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0]) if self.base_model_torch_dtype is not None: # handle the case for Bloom where it outputs tuple of tuples if isinstance(past_key_values[0], tuple): past_key_values = tuple( tuple( past_key_value.to(self.base_model_torch_dtype) for past_key_value in past_key_value_tuple ) for past_key_value_tuple in past_key_values ) else: past_key_values = tuple( past_key_value.to(self.base_model_torch_dtype) for past_key_value in past_key_values ) model_kwargs["past_key_values"] = past_key_values else: if model_kwargs["past_key_values"] is None: inputs_embeds = self.word_embeddings(model_kwargs["input_ids"]) prompts = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0]) prompts = prompts.to(inputs_embeds.dtype) model_kwargs["inputs_embeds"] = torch.cat((prompts, inputs_embeds), dim=1) model_kwargs["input_ids"] = None return model_kwargs class PeftModelForSeq2SeqLM(PeftModel): """ Peft model for sequence-to-sequence language modeling. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. Example: ```py >>> from transformers import AutoModelForSeq2SeqLM >>> from peft import PeftModelForSeq2SeqLM, get_peft_config >>> config = { ... "peft_type": "LORA", ... "task_type": "SEQ_2_SEQ_LM", ... "inference_mode": False, ... "r": 8, ... "target_modules": ["q", "v"], ... "lora_alpha": 32, ... "lora_dropout": 0.1, ... "merge_weights": False, ... "fan_in_fan_out": False, ... "enable_lora": None, ... "bias": "none", ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> peft_model = PeftModelForSeq2SeqLM(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 884736 || all params: 223843584 || trainable%: 0.3952474242013566 ``` """ def __init__(self, model, peft_config: PeftConfig, adapter_name="default"): super().__init__(model, peft_config, adapter_name) self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation self.base_model_prepare_encoder_decoder_kwargs_for_generation = ( self.base_model._prepare_encoder_decoder_kwargs_for_generation ) def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, decoder_input_ids=None, decoder_attention_mask=None, decoder_inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): peft_config = self.active_peft_config if not isinstance(peft_config, PromptLearningConfig): return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_inputs_embeds=decoder_inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = input_ids.shape[0] if decoder_attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(self.device) decoder_attention_mask = torch.cat((prefix_attention_mask, decoder_attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids") kwargs["token_type_ids"] = None kwargs.update( { "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "labels": labels, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: past_key_values = self.get_prompt(batch_size) return self.base_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, past_key_values=past_key_values, **kwargs ) else: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if decoder_inputs_embeds is None and decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(
17
2023-10-19 10:55:50+00:00
24k
YuroFR/freqtrade-modded-crypto-trading-bot
freqtrade/exchange/exchange.py
[ { "identifier": "DEFAULT_AMOUNT_RESERVE_PERCENT", "path": "freqtrade/constants.py", "snippet": "DOCS_LINK = \"https://www.freqtrade.io/en/stable\"\nDEFAULT_CONFIG = 'config.json'\nPROCESS_THROTTLE_SECS = 5 # sec\nHYPEROPT_EPOCH = 100 # epochs\nRETRY_TIMEOUT = 30 # sec\nTIMEOUT_UNITS = ['minutes', 'se...
import asyncio import inspect import logging import signal import ccxt import ccxt.async_support as ccxt_async from copy import deepcopy from datetime import datetime, timedelta, timezone from math import floor from threading import Lock from typing import Any, Coroutine, Dict, List, Literal, Optional, Tuple, Union from cachetools import TTLCache from ccxt import TICK_SIZE from dateutil import parser from pandas import DataFrame, concat from freqtrade.constants import (DEFAULT_AMOUNT_RESERVE_PERCENT, NON_OPEN_EXCHANGE_STATES, BidAsk, BuySell, Config, EntryExit, ExchangeConfig, ListPairsWithTimeframes, MakerTaker, OBLiteral, PairWithTimeframe) from freqtrade.data.converter import clean_ohlcv_dataframe, ohlcv_to_dataframe, trades_dict_to_list from freqtrade.enums import OPTIMIZE_MODES, CandleType, MarginMode, PriceType, TradingMode from freqtrade.exceptions import (DDosProtection, ExchangeError, InsufficientFundsError, InvalidOrderException, OperationalException, PricingError, RetryableOrderError, TemporaryError) from freqtrade.exchange.common import (API_FETCH_ORDER_RETRY_COUNT, remove_exchange_credentials, retrier, retrier_async) from freqtrade.exchange.exchange_utils import (ROUND, ROUND_DOWN, ROUND_UP, CcxtModuleType, amount_to_contract_precision, amount_to_contracts, amount_to_precision, contracts_to_amount, date_minus_candles, is_exchange_known_ccxt, market_is_active, price_to_precision, timeframe_to_minutes, timeframe_to_msecs, timeframe_to_next_date, timeframe_to_prev_date, timeframe_to_seconds) from freqtrade.exchange.types import OHLCVResponse, OrderBook, Ticker, Tickers from freqtrade.misc import (chunks, deep_merge_dicts, file_dump_json, file_load_json, safe_value_fallback2) from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist from freqtrade.util import dt_from_ts, dt_now from freqtrade.util.datetime_helpers import dt_humanize, dt_ts from freqtrade.persistence import Order
15,462
timeframe, candle_type, since_ms) move_to = one_call * self.required_candle_call_count now = timeframe_to_next_date(timeframe) since_ms = int((now - timedelta(seconds=move_to // 1000)).timestamp() * 1000) if since_ms: return self._async_get_historic_ohlcv( pair, timeframe, since_ms=since_ms, raise_=True, candle_type=candle_type) else: # One call ... "regular" refresh return self._async_get_candle_history( pair, timeframe, since_ms=since_ms, candle_type=candle_type) def _build_ohlcv_dl_jobs( self, pair_list: ListPairsWithTimeframes, since_ms: Optional[int], cache: bool) -> Tuple[List[Coroutine], List[Tuple[str, str, CandleType]]]: """ Build Coroutines to execute as part of refresh_latest_ohlcv """ input_coroutines: List[Coroutine[Any, Any, OHLCVResponse]] = [] cached_pairs = [] for pair, timeframe, candle_type in set(pair_list): if (timeframe not in self.timeframes and candle_type in (CandleType.SPOT, CandleType.FUTURES)): logger.warning( f"Cannot download ({pair}, {timeframe}) combination as this timeframe is " f"not available on {self.name}. Available timeframes are " f"{', '.join(self.timeframes)}.") continue if ((pair, timeframe, candle_type) not in self._klines or not cache or self._now_is_time_to_refresh(pair, timeframe, candle_type)): input_coroutines.append( self._build_coroutine(pair, timeframe, candle_type, since_ms, cache)) else: logger.debug( f"Using cached candle (OHLCV) data for {pair}, {timeframe}, {candle_type} ..." ) cached_pairs.append((pair, timeframe, candle_type)) return input_coroutines, cached_pairs def _process_ohlcv_df(self, pair: str, timeframe: str, c_type: CandleType, ticks: List[List], cache: bool, drop_incomplete: bool) -> DataFrame: # keeping last candle time as last refreshed time of the pair if ticks and cache: idx = -2 if drop_incomplete and len(ticks) > 1 else -1 self._pairs_last_refresh_time[(pair, timeframe, c_type)] = ticks[idx][0] // 1000 # keeping parsed dataframe in cache ohlcv_df = ohlcv_to_dataframe(ticks, timeframe, pair=pair, fill_missing=True, drop_incomplete=drop_incomplete) if cache: if (pair, timeframe, c_type) in self._klines: old = self._klines[(pair, timeframe, c_type)] # Reassign so we return the updated, combined df ohlcv_df = clean_ohlcv_dataframe(concat([old, ohlcv_df], axis=0), timeframe, pair, fill_missing=True, drop_incomplete=False) candle_limit = self.ohlcv_candle_limit(timeframe, self._config['candle_type_def']) # Age out old candles ohlcv_df = ohlcv_df.tail(candle_limit + self._startup_candle_count) ohlcv_df = ohlcv_df.reset_index(drop=True) self._klines[(pair, timeframe, c_type)] = ohlcv_df else: self._klines[(pair, timeframe, c_type)] = ohlcv_df return ohlcv_df def refresh_latest_ohlcv(self, pair_list: ListPairsWithTimeframes, *, since_ms: Optional[int] = None, cache: bool = True, drop_incomplete: Optional[bool] = None ) -> Dict[PairWithTimeframe, DataFrame]: """ Refresh in-memory OHLCV asynchronously and set `_klines` with the result Loops asynchronously over pair_list and downloads all pairs async (semi-parallel). Only used in the dataprovider.refresh() method. :param pair_list: List of 2 element tuples containing pair, interval to refresh :param since_ms: time since when to download, in milliseconds :param cache: Assign result to _klines. Usefull for one-off downloads like for pairlists :param drop_incomplete: Control candle dropping. Specifying None defaults to _ohlcv_partial_candle :return: Dict of [{(pair, timeframe): Dataframe}] """ logger.debug("Refreshing candle (OHLCV) data for %d pairs", len(pair_list)) # Gather coroutines to run input_coroutines, cached_pairs = self._build_ohlcv_dl_jobs(pair_list, since_ms, cache) results_df = {} # Chunk requests into batches of 100 to avoid overwelming ccxt Throttling for input_coro in chunks(input_coroutines, 100): async def gather_stuff(): return await asyncio.gather(*input_coro, return_exceptions=True) with self._loop_lock: results = self.loop.run_until_complete(gather_stuff()) for res in results: if isinstance(res, Exception): logger.warning(f"Async code raised an exception: {repr(res)}") continue # Deconstruct tuple (has 5 elements) pair, timeframe, c_type, ticks, drop_hint = res drop_incomplete_ = drop_hint if drop_incomplete is None else drop_incomplete ohlcv_df = self._process_ohlcv_df( pair, timeframe, c_type, ticks, cache, drop_incomplete_) results_df[(pair, timeframe, c_type)] = ohlcv_df # Return cached klines for pair, timeframe, c_type in cached_pairs: results_df[(pair, timeframe, c_type)] = self.klines( (pair, timeframe, c_type), copy=False ) return results_df def _now_is_time_to_refresh(self, pair: str, timeframe: str, candle_type: CandleType) -> bool: # Timeframe in seconds
# pragma pylint: disable=W0603 """ Cryptocurrency Exchanges support """ logger = logging.getLogger(__name__) class Exchange: # Parameters to add directly to buy/sell calls (like agreeing to trading agreement) _params: Dict = {} # Additional parameters - added to the ccxt object _ccxt_params: Dict = {} # Dict to specify which options each exchange implements # This defines defaults, which can be selectively overridden by subclasses using _ft_has # or by specifying them in the configuration. _ft_has_default: Dict = { "stoploss_on_exchange": False, "stop_price_param": "stopLossPrice", # Used for stoploss_on_exchange request "stop_price_prop": "stopLossPrice", # Used for stoploss_on_exchange response parsing "order_time_in_force": ["GTC"], "ohlcv_params": {}, "ohlcv_candle_limit": 500, "ohlcv_has_history": True, # Some exchanges (Kraken) don't provide history via ohlcv "ohlcv_partial_candle": True, "ohlcv_require_since": False, # Check https://github.com/ccxt/ccxt/issues/10767 for removal of ohlcv_volume_currency "ohlcv_volume_currency": "base", # "base" or "quote" "tickers_have_quoteVolume": True, "tickers_have_bid_ask": True, # bid / ask empty for fetch_tickers "tickers_have_price": True, "trades_pagination": "time", # Possible are "time" or "id" "trades_pagination_arg": "since", "l2_limit_range": None, "l2_limit_range_required": True, # Allow Empty L2 limit (kucoin) "mark_ohlcv_price": "mark", "mark_ohlcv_timeframe": "8h", "ccxt_futures_name": "swap", "needs_trading_fees": False, # use fetch_trading_fees to cache fees "order_props_in_contracts": ['amount', 'filled', 'remaining'], # Override createMarketBuyOrderRequiresPrice where ccxt has it wrong "marketOrderRequiresPrice": False, } _ft_has: Dict = {} _ft_has_futures: Dict = {} _supported_trading_mode_margin_pairs: List[Tuple[TradingMode, MarginMode]] = [ # TradingMode.SPOT always supported and not required in this list ] def __init__(self, config: Config, *, exchange_config: Optional[ExchangeConfig] = None, validate: bool = True, load_leverage_tiers: bool = False) -> None: """ Initializes this module with the given config, it does basic validation whether the specified exchange and pairs are valid. :return: None """ self._api: ccxt.Exchange self._api_async: ccxt_async.Exchange = None self._markets: Dict = {} self._trading_fees: Dict[str, Any] = {} self._leverage_tiers: Dict[str, List[Dict]] = {} # Lock event loop. This is necessary to avoid race-conditions when using force* commands # Due to funding fee fetching. self._loop_lock = Lock() self.loop = self._init_async_loop() self._config: Config = {} self._config.update(config) # Holds last candle refreshed time of each pair self._pairs_last_refresh_time: Dict[PairWithTimeframe, int] = {} # Timestamp of last markets refresh self._last_markets_refresh: int = 0 # Cache for 10 minutes ... self._cache_lock = Lock() self._fetch_tickers_cache: TTLCache = TTLCache(maxsize=2, ttl=60 * 10) # Cache values for 1800 to avoid frequent polling of the exchange for prices # Caching only applies to RPC methods, so prices for open trades are still # refreshed once every iteration. self._exit_rate_cache: TTLCache = TTLCache(maxsize=100, ttl=1800) self._entry_rate_cache: TTLCache = TTLCache(maxsize=100, ttl=1800) # Holds candles self._klines: Dict[PairWithTimeframe, DataFrame] = {} # Holds all open sell orders for dry_run self._dry_run_open_orders: Dict[str, Any] = {} if config['dry_run']: logger.info('Instance is running with dry_run enabled') logger.info(f"Using CCXT {ccxt.__version__}") exchange_conf: Dict[str, Any] = exchange_config if exchange_config else config['exchange'] remove_exchange_credentials(exchange_conf, config.get('dry_run', False)) self.log_responses = exchange_conf.get('log_responses', False) # Leverage properties self.trading_mode: TradingMode = config.get('trading_mode', TradingMode.SPOT) self.margin_mode: MarginMode = ( MarginMode(config.get('margin_mode')) if config.get('margin_mode') else MarginMode.NONE ) self.liquidation_buffer = config.get('liquidation_buffer', 0.05) # Deep merge ft_has with default ft_has options self._ft_has = deep_merge_dicts(self._ft_has, deepcopy(self._ft_has_default)) if self.trading_mode == TradingMode.FUTURES: self._ft_has = deep_merge_dicts(self._ft_has_futures, self._ft_has) if exchange_conf.get('_ft_has_params'): self._ft_has = deep_merge_dicts(exchange_conf.get('_ft_has_params'), self._ft_has) logger.info("Overriding exchange._ft_has with config params, result: %s", self._ft_has) # Assign this directly for easy access self._ohlcv_partial_candle = self._ft_has['ohlcv_partial_candle'] self._trades_pagination = self._ft_has['trades_pagination'] self._trades_pagination_arg = self._ft_has['trades_pagination_arg'] # Initialize ccxt objects ccxt_config = self._ccxt_config ccxt_config = deep_merge_dicts(exchange_conf.get('ccxt_config', {}), ccxt_config) ccxt_config = deep_merge_dicts(exchange_conf.get('ccxt_sync_config', {}), ccxt_config) self._api = self._init_ccxt(exchange_conf, ccxt_kwargs=ccxt_config) ccxt_async_config = self._ccxt_config ccxt_async_config = deep_merge_dicts(exchange_conf.get('ccxt_config', {}), ccxt_async_config) ccxt_async_config = deep_merge_dicts(exchange_conf.get('ccxt_async_config', {}), ccxt_async_config) self._api_async = self._init_ccxt( exchange_conf, ccxt_async, ccxt_kwargs=ccxt_async_config) logger.info(f'Using Exchange "{self.name}"') self.required_candle_call_count = 1 if validate: # Initial markets load self._load_markets() self.validate_config(config) self._startup_candle_count: int = config.get('startup_candle_count', 0) self.required_candle_call_count = self.validate_required_startup_candles( self._startup_candle_count, config.get('timeframe', '')) # Converts the interval provided in minutes in config to seconds self.markets_refresh_interval: int = exchange_conf.get( "markets_refresh_interval", 60) * 60 * 1000 if self.trading_mode != TradingMode.SPOT and load_leverage_tiers: self.fill_leverage_tiers() self.additional_exchange_init() def __del__(self): """ Destructor - clean up async stuff """ self.close() def close(self): logger.debug("Exchange object destroyed, closing async loop") if (self._api_async and inspect.iscoroutinefunction(self._api_async.close) and self._api_async.session): logger.debug("Closing async ccxt session.") self.loop.run_until_complete(self._api_async.close()) if self.loop and not self.loop.is_closed(): self.loop.close() def _init_async_loop(self) -> asyncio.AbstractEventLoop: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) return loop def validate_config(self, config): # Check if timeframe is available self.validate_timeframes(config.get('timeframe')) # Check if all pairs are available self.validate_stakecurrency(config['stake_currency']) if not config['exchange'].get('skip_pair_validation'): self.validate_pairs(config['exchange']['pair_whitelist']) self.validate_ordertypes(config.get('order_types', {})) self.validate_order_time_in_force(config.get('order_time_in_force', {})) self.validate_trading_mode_and_margin_mode(self.trading_mode, self.margin_mode) self.validate_pricing(config['exit_pricing']) self.validate_pricing(config['entry_pricing']) def _init_ccxt(self, exchange_config: Dict[str, Any], ccxt_module: CcxtModuleType = ccxt, ccxt_kwargs: Dict = {}) -> ccxt.Exchange: """ Initialize ccxt with given config and return valid ccxt instance. """ # Find matching class for the given exchange name name = exchange_config['name'] if not is_exchange_known_ccxt(name, ccxt_module): raise OperationalException(f'Exchange {name} is not supported by ccxt') ex_config = { 'apiKey': exchange_config.get('key'), 'secret': exchange_config.get('secret'), 'password': exchange_config.get('password'), 'uid': exchange_config.get('uid', ''), } if ccxt_kwargs: logger.info('Applying additional ccxt config: %s', ccxt_kwargs) if self._ccxt_params: # Inject static options after the above output to not confuse users. ccxt_kwargs = deep_merge_dicts(self._ccxt_params, ccxt_kwargs) if ccxt_kwargs: ex_config.update(ccxt_kwargs) try: api = getattr(ccxt_module, name.lower())(ex_config) except (KeyError, AttributeError) as e: raise OperationalException(f'Exchange {name} is not supported') from e except ccxt.BaseError as e: raise OperationalException(f"Initialization of ccxt failed. Reason: {e}") from e return api @property def _ccxt_config(self) -> Dict: # Parameters to add directly to ccxt sync/async initialization. if self.trading_mode == TradingMode.MARGIN: return { "options": { "defaultType": "margin" } } elif self.trading_mode == TradingMode.FUTURES: return { "options": { "defaultType": self._ft_has["ccxt_futures_name"] } } else: return {} @property def name(self) -> str: """exchange Name (from ccxt)""" return self._api.name @property def id(self) -> str: """exchange ccxt id""" return self._api.id @property def timeframes(self) -> List[str]: return list((self._api.timeframes or {}).keys()) @property def markets(self) -> Dict[str, Any]: """exchange ccxt markets""" if not self._markets: logger.info("Markets were not loaded. Loading them now..") self._load_markets() return self._markets @property def precisionMode(self) -> int: """exchange ccxt precisionMode""" return self._api.precisionMode def additional_exchange_init(self) -> None: """ Additional exchange initialization logic. .api will be available at this point. Must be overridden in child methods if required. """ pass def _log_exchange_response(self, endpoint, response) -> None: """ Log exchange responses """ if self.log_responses: logger.info(f"API {endpoint}: {response}") def ohlcv_candle_limit( self, timeframe: str, candle_type: CandleType, since_ms: Optional[int] = None) -> int: """ Exchange ohlcv candle limit Uses ohlcv_candle_limit_per_timeframe if the exchange has different limits per timeframe (e.g. bittrex), otherwise falls back to ohlcv_candle_limit :param timeframe: Timeframe to check :param candle_type: Candle-type :param since_ms: Starting timestamp :return: Candle limit as integer """ return int(self._ft_has.get('ohlcv_candle_limit_per_timeframe', {}).get( timeframe, self._ft_has.get('ohlcv_candle_limit'))) def get_markets(self, base_currencies: List[str] = [], quote_currencies: List[str] = [], spot_only: bool = False, margin_only: bool = False, futures_only: bool = False, tradable_only: bool = True, active_only: bool = False) -> Dict[str, Any]: """ Return exchange ccxt markets, filtered out by base currency and quote currency if this was requested in parameters. """ markets = self.markets if not markets: raise OperationalException("Markets were not loaded.") if base_currencies: markets = {k: v for k, v in markets.items() if v['base'] in base_currencies} if quote_currencies: markets = {k: v for k, v in markets.items() if v['quote'] in quote_currencies} if tradable_only: markets = {k: v for k, v in markets.items() if self.market_is_tradable(v)} if spot_only: markets = {k: v for k, v in markets.items() if self.market_is_spot(v)} if margin_only: markets = {k: v for k, v in markets.items() if self.market_is_margin(v)} if futures_only: markets = {k: v for k, v in markets.items() if self.market_is_future(v)} if active_only: markets = {k: v for k, v in markets.items() if market_is_active(v)} return markets def get_quote_currencies(self) -> List[str]: """ Return a list of supported quote currencies """ markets = self.markets return sorted(set([x['quote'] for _, x in markets.items()])) def get_pair_quote_currency(self, pair: str) -> str: """ Return a pair's quote currency (base/quote:settlement) """ return self.markets.get(pair, {}).get('quote', '') def get_pair_base_currency(self, pair: str) -> str: """ Return a pair's base currency (base/quote:settlement) """ return self.markets.get(pair, {}).get('base', '') def market_is_future(self, market: Dict[str, Any]) -> bool: return ( market.get(self._ft_has["ccxt_futures_name"], False) is True and market.get('linear', False) is True ) def market_is_spot(self, market: Dict[str, Any]) -> bool: return market.get('spot', False) is True def market_is_margin(self, market: Dict[str, Any]) -> bool: return market.get('margin', False) is True def market_is_tradable(self, market: Dict[str, Any]) -> bool: """ Check if the market symbol is tradable by Freqtrade. Ensures that Configured mode aligns to """ return ( market.get('quote', None) is not None and market.get('base', None) is not None and (self.precisionMode != TICK_SIZE # Too low precision will falsify calculations or market.get('precision', {}).get('price') > 1e-11) and ((self.trading_mode == TradingMode.SPOT and self.market_is_spot(market)) or (self.trading_mode == TradingMode.MARGIN and self.market_is_margin(market)) or (self.trading_mode == TradingMode.FUTURES and self.market_is_future(market))) ) def klines(self, pair_interval: PairWithTimeframe, copy: bool = True) -> DataFrame: if pair_interval in self._klines: return self._klines[pair_interval].copy() if copy else self._klines[pair_interval] else: return DataFrame() def get_contract_size(self, pair: str) -> Optional[float]: if self.trading_mode == TradingMode.FUTURES: market = self.markets.get(pair, {}) contract_size: float = 1.0 if not market: return None if market.get('contractSize') is not None: # ccxt has contractSize in markets as string contract_size = float(market['contractSize']) return contract_size else: return 1 def _trades_contracts_to_amount(self, trades: List) -> List: if len(trades) > 0 and 'symbol' in trades[0]: contract_size = self.get_contract_size(trades[0]['symbol']) if contract_size != 1: for trade in trades: trade['amount'] = trade['amount'] * contract_size return trades def _order_contracts_to_amount(self, order: Dict) -> Dict: if 'symbol' in order and order['symbol'] is not None: contract_size = self.get_contract_size(order['symbol']) if contract_size != 1: for prop in self._ft_has.get('order_props_in_contracts', []): if prop in order and order[prop] is not None: order[prop] = order[prop] * contract_size return order def _amount_to_contracts(self, pair: str, amount: float) -> float: contract_size = self.get_contract_size(pair) return amount_to_contracts(amount, contract_size) def _contracts_to_amount(self, pair: str, num_contracts: float) -> float: contract_size = self.get_contract_size(pair) return contracts_to_amount(num_contracts, contract_size) def amount_to_contract_precision(self, pair: str, amount: float) -> float: """ Helper wrapper around amount_to_contract_precision """ contract_size = self.get_contract_size(pair) return amount_to_contract_precision(amount, self.get_precision_amount(pair), self.precisionMode, contract_size) def _load_async_markets(self, reload: bool = False) -> None: try: if self._api_async: self.loop.run_until_complete( self._api_async.load_markets(reload=reload, params={})) except (asyncio.TimeoutError, ccxt.BaseError) as e: logger.warning('Could not load async markets. Reason: %s', e) return def _load_markets(self) -> None: """ Initialize markets both sync and async """ try: self._markets = self._api.load_markets(params={}) self._load_async_markets() self._last_markets_refresh = dt_ts() if self._ft_has['needs_trading_fees']: self._trading_fees = self.fetch_trading_fees() except ccxt.BaseError: logger.exception('Unable to initialize markets.') def reload_markets(self) -> None: """Reload markets both sync and async if refresh interval has passed """ # Check whether markets have to be reloaded if (self._last_markets_refresh > 0) and ( self._last_markets_refresh + self.markets_refresh_interval > dt_ts()): return None logger.debug("Performing scheduled market reload..") try: self._markets = self._api.load_markets(reload=True, params={}) # Also reload async markets to avoid issues with newly listed pairs self._load_async_markets(reload=True) self._last_markets_refresh = dt_ts() self.fill_leverage_tiers() except ccxt.BaseError: logger.exception("Could not reload markets.") def validate_stakecurrency(self, stake_currency: str) -> None: """ Checks stake-currency against available currencies on the exchange. Only runs on startup. If markets have not been loaded, there's been a problem with the connection to the exchange. :param stake_currency: Stake-currency to validate :raise: OperationalException if stake-currency is not available. """ if not self._markets: raise OperationalException( 'Could not load markets, therefore cannot start. ' 'Please investigate the above error for more details.' ) quote_currencies = self.get_quote_currencies() if stake_currency not in quote_currencies: raise OperationalException( f"{stake_currency} is not available as stake on {self.name}. " f"Available currencies are: {', '.join(quote_currencies)}") def validate_pairs(self, pairs: List[str]) -> None: """ Checks if all given pairs are tradable on the current exchange. :param pairs: list of pairs :raise: OperationalException if one pair is not available :return: None """ if not self.markets: logger.warning('Unable to validate pairs (assuming they are correct).') return extended_pairs = expand_pairlist(pairs, list(self.markets), keep_invalid=True) invalid_pairs = [] for pair in extended_pairs: # Note: ccxt has BaseCurrency/QuoteCurrency format for pairs if self.markets and pair not in self.markets: raise OperationalException( f'Pair {pair} is not available on {self.name} {self.trading_mode.value}. ' f'Please remove {pair} from your whitelist.') # From ccxt Documentation: # markets.info: An associative array of non-common market properties, # including fees, rates, limits and other general market information. # The internal info array is different for each particular market, # its contents depend on the exchange. # It can also be a string or similar ... so we need to verify that first. elif (isinstance(self.markets[pair].get('info'), dict) and self.markets[pair].get('info', {}).get('prohibitedIn', False)): # Warn users about restricted pairs in whitelist. # We cannot determine reliably if Users are affected. logger.warning(f"Pair {pair} is restricted for some users on this exchange." f"Please check if you are impacted by this restriction " f"on the exchange and eventually remove {pair} from your whitelist.") if (self._config['stake_currency'] and self.get_pair_quote_currency(pair) != self._config['stake_currency']): invalid_pairs.append(pair) if invalid_pairs: raise OperationalException( f"Stake-currency '{self._config['stake_currency']}' not compatible with " f"pair-whitelist. Please remove the following pairs: {invalid_pairs}") def get_valid_pair_combination(self, curr_1: str, curr_2: str) -> str: """ Get valid pair combination of curr_1 and curr_2 by trying both combinations. """ for pair in [f"{curr_1}/{curr_2}", f"{curr_2}/{curr_1}"]: if pair in self.markets and self.markets[pair].get('active'): return pair raise ValueError(f"Could not combine {curr_1} and {curr_2} to get a valid pair.") def validate_timeframes(self, timeframe: Optional[str]) -> None: """ Check if timeframe from config is a supported timeframe on the exchange """ if not hasattr(self._api, "timeframes") or self._api.timeframes is None: # If timeframes attribute is missing (or is None), the exchange probably # has no fetchOHLCV method. # Therefore we also show that. raise OperationalException( f"The ccxt library does not provide the list of timeframes " f"for the exchange {self.name} and this exchange " f"is therefore not supported. ccxt fetchOHLCV: {self.exchange_has('fetchOHLCV')}") if timeframe and (timeframe not in self.timeframes): raise OperationalException( f"Invalid timeframe '{timeframe}'. This exchange supports: {self.timeframes}") if timeframe and timeframe_to_minutes(timeframe) < 1: raise OperationalException("Timeframes < 1m are currently not supported by Freqtrade.") def validate_ordertypes(self, order_types: Dict) -> None: """ Checks if order-types configured in strategy/config are supported """ if any(v == 'market' for k, v in order_types.items()): if not self.exchange_has('createMarketOrder'): raise OperationalException( f'Exchange {self.name} does not support market orders.') self.validate_stop_ordertypes(order_types) def validate_stop_ordertypes(self, order_types: Dict) -> None: """ Validate stoploss order types """ if (order_types.get("stoploss_on_exchange") and not self._ft_has.get("stoploss_on_exchange", False)): raise OperationalException( f'On exchange stoploss is not supported for {self.name}.' ) if self.trading_mode == TradingMode.FUTURES: price_mapping = self._ft_has.get('stop_price_type_value_mapping', {}).keys() if ( order_types.get("stoploss_on_exchange", False) is True and 'stoploss_price_type' in order_types and order_types['stoploss_price_type'] not in price_mapping ): raise OperationalException( f'On exchange stoploss price type is not supported for {self.name}.' ) def validate_pricing(self, pricing: Dict) -> None: if pricing.get('use_order_book', False) and not self.exchange_has('fetchL2OrderBook'): raise OperationalException(f'Orderbook not available for {self.name}.') if (not pricing.get('use_order_book', False) and ( not self.exchange_has('fetchTicker') or not self._ft_has['tickers_have_price'])): raise OperationalException(f'Ticker pricing not available for {self.name}.') def validate_order_time_in_force(self, order_time_in_force: Dict) -> None: """ Checks if order time in force configured in strategy/config are supported """ if any(v.upper() not in self._ft_has["order_time_in_force"] for k, v in order_time_in_force.items()): raise OperationalException( f'Time in force policies are not supported for {self.name} yet.') def validate_required_startup_candles(self, startup_candles: int, timeframe: str) -> int: """ Checks if required startup_candles is more than ohlcv_candle_limit(). Requires a grace-period of 5 candles - so a startup-period up to 494 is allowed by default. """ candle_limit = self.ohlcv_candle_limit( timeframe, self._config['candle_type_def'], int(date_minus_candles(timeframe, startup_candles).timestamp() * 1000) if timeframe else None) # Require one more candle - to account for the still open candle. candle_count = startup_candles + 1 # Allow 5 calls to the exchange per pair required_candle_call_count = int( (candle_count / candle_limit) + (0 if candle_count % candle_limit == 0 else 1)) if self._ft_has['ohlcv_has_history']: if required_candle_call_count > 5: # Only allow 5 calls per pair to somewhat limit the impact raise OperationalException( f"This strategy requires {startup_candles} candles to start, " "which is more than 5x " f"the amount of candles {self.name} provides for {timeframe}.") elif required_candle_call_count > 1: raise OperationalException( f"This strategy requires {startup_candles} candles to start, which is more than " f"the amount of candles {self.name} provides for {timeframe}.") if required_candle_call_count > 1: logger.warning(f"Using {required_candle_call_count} calls to get OHLCV. " f"This can result in slower operations for the bot. Please check " f"if you really need {startup_candles} candles for your strategy") return required_candle_call_count def validate_trading_mode_and_margin_mode( self, trading_mode: TradingMode, margin_mode: Optional[MarginMode] # Only None when trading_mode = TradingMode.SPOT ): """ Checks if freqtrade can perform trades using the configured trading mode(Margin, Futures) and MarginMode(Cross, Isolated) Throws OperationalException: If the trading_mode/margin_mode type are not supported by freqtrade on this exchange """ if trading_mode != TradingMode.SPOT and ( (trading_mode, margin_mode) not in self._supported_trading_mode_margin_pairs ): mm_value = margin_mode and margin_mode.value raise OperationalException( f"Freqtrade does not support {mm_value} {trading_mode.value} on {self.name}" ) def get_option(self, param: str, default: Optional[Any] = None) -> Any: """ Get parameter value from _ft_has """ return self._ft_has.get(param, default) def exchange_has(self, endpoint: str) -> bool: """ Checks if exchange implements a specific API endpoint. Wrapper around ccxt 'has' attribute :param endpoint: Name of endpoint (e.g. 'fetchOHLCV', 'fetchTickers') :return: bool """ return endpoint in self._api.has and self._api.has[endpoint] def get_precision_amount(self, pair: str) -> Optional[float]: """ Returns the amount precision of the exchange. :param pair: Pair to get precision for :return: precision for amount or None. Must be used in combination with precisionMode """ return self.markets.get(pair, {}).get('precision', {}).get('amount', None) def get_precision_price(self, pair: str) -> Optional[float]: """ Returns the price precision of the exchange. :param pair: Pair to get precision for :return: precision for price or None. Must be used in combination with precisionMode """ return self.markets.get(pair, {}).get('precision', {}).get('price', None) def amount_to_precision(self, pair: str, amount: float) -> float: """ Returns the amount to buy or sell to a precision the Exchange accepts """ return amount_to_precision(amount, self.get_precision_amount(pair), self.precisionMode) def price_to_precision(self, pair: str, price: float, *, rounding_mode: int = ROUND) -> float: """ Returns the price rounded to the precision the Exchange accepts. The default price_rounding_mode in conf is ROUND. For stoploss calculations, must use ROUND_UP for longs, and ROUND_DOWN for shorts. """ return price_to_precision(price, self.get_precision_price(pair), self.precisionMode, rounding_mode=rounding_mode) def price_get_one_pip(self, pair: str, price: float) -> float: """ Get's the "1 pip" value for this pair. Used in PriceFilter to calculate the 1pip movements. """ precision = self.markets[pair]['precision']['price'] if self.precisionMode == TICK_SIZE: return precision else: return 1 / pow(10, precision) def get_min_pair_stake_amount( self, pair: str, price: float, stoploss: float, leverage: Optional[float] = 1.0 ) -> Optional[float]: return self._get_stake_amount_limit(pair, price, stoploss, 'min', leverage) def get_max_pair_stake_amount(self, pair: str, price: float, leverage: float = 1.0) -> float: max_stake_amount = self._get_stake_amount_limit(pair, price, 0.0, 'max', leverage) if max_stake_amount is None: # * Should never be executed raise OperationalException(f'{self.name}.get_max_pair_stake_amount should' 'never set max_stake_amount to None') return max_stake_amount def _get_stake_amount_limit( self, pair: str, price: float, stoploss: float, limit: Literal['min', 'max'], leverage: Optional[float] = 1.0 ) -> Optional[float]: isMin = limit == 'min' try: market = self.markets[pair] except KeyError: raise ValueError(f"Can't get market information for symbol {pair}") if isMin: # reserve some percent defined in config (5% default) + stoploss margin_reserve: float = 1.0 + self._config.get('amount_reserve_percent', DEFAULT_AMOUNT_RESERVE_PERCENT) stoploss_reserve = ( margin_reserve / (1 - abs(stoploss)) if abs(stoploss) != 1 else 1.5 ) # it should not be more than 50% stoploss_reserve = max(min(stoploss_reserve, 1.5), 1) else: margin_reserve = 1.0 stoploss_reserve = 1.0 stake_limits = [] limits = market['limits'] if (limits['cost'][limit] is not None): stake_limits.append( self._contracts_to_amount(pair, limits['cost'][limit]) * stoploss_reserve ) if (limits['amount'][limit] is not None): stake_limits.append( self._contracts_to_amount(pair, limits['amount'][limit]) * price * margin_reserve ) if not stake_limits: return None if isMin else float('inf') # The value returned should satisfy both limits: for amount (base currency) and # for cost (quote, stake currency), so max() is used here. # See also #2575 at github. return self._get_stake_amount_considering_leverage( max(stake_limits) if isMin else min(stake_limits), leverage or 1.0 ) def _get_stake_amount_considering_leverage(self, stake_amount: float, leverage: float) -> float: """ Takes the minimum stake amount for a pair with no leverage and returns the minimum stake amount when leverage is considered :param stake_amount: The stake amount for a pair before leverage is considered :param leverage: The amount of leverage being used on the current trade """ return stake_amount / leverage # Dry-run methods def create_dry_run_order(self, pair: str, ordertype: str, side: str, amount: float, rate: float, leverage: float, params: Dict = {}, stop_loss: bool = False) -> Dict[str, Any]: now = dt_now() order_id = f'dry_run_{side}_{pair}_{now.timestamp()}' # Rounding here must respect to contract sizes _amount = self._contracts_to_amount( pair, self.amount_to_precision(pair, self._amount_to_contracts(pair, amount))) dry_order: Dict[str, Any] = { 'id': order_id, 'symbol': pair, 'price': rate, 'average': rate, 'amount': _amount, 'cost': _amount * rate, 'type': ordertype, 'side': side, 'filled': 0, 'remaining': _amount, 'datetime': now.strftime('%Y-%m-%dT%H:%M:%S.%fZ'), 'timestamp': dt_ts(now), 'status': "open", 'fee': None, 'info': {}, 'leverage': leverage } if stop_loss: dry_order["info"] = {"stopPrice": dry_order["price"]} dry_order[self._ft_has['stop_price_prop']] = dry_order["price"] # Workaround to avoid filling stoploss orders immediately dry_order["ft_order_type"] = "stoploss" orderbook: Optional[OrderBook] = None if self.exchange_has('fetchL2OrderBook'): orderbook = self.fetch_l2_order_book(pair, 20) if ordertype == "limit" and orderbook: # Allow a 1% price difference allowed_diff = 0.01 if self._dry_is_price_crossed(pair, side, rate, orderbook, allowed_diff): logger.info( f"Converted order {pair} to market order due to price {rate} crossing spread " f"by more than {allowed_diff:.2%}.") dry_order["type"] = "market" if dry_order["type"] == "market" and not dry_order.get("ft_order_type"): # Update market order pricing average = self.get_dry_market_fill_price(pair, side, amount, rate, orderbook) dry_order.update({ 'average': average, 'filled': _amount, 'remaining': 0.0, 'status': "closed", 'cost': (dry_order['amount'] * average) }) # market orders will always incurr taker fees dry_order = self.add_dry_order_fee(pair, dry_order, 'taker') dry_order = self.check_dry_limit_order_filled( dry_order, immediate=True, orderbook=orderbook) self._dry_run_open_orders[dry_order["id"]] = dry_order # Copy order and close it - so the returned order is open unless it's a market order return dry_order def add_dry_order_fee( self, pair: str, dry_order: Dict[str, Any], taker_or_maker: MakerTaker, ) -> Dict[str, Any]: fee = self.get_fee(pair, taker_or_maker=taker_or_maker) dry_order.update({ 'fee': { 'currency': self.get_pair_quote_currency(pair), 'cost': dry_order['cost'] * fee, 'rate': fee } }) return dry_order def get_dry_market_fill_price(self, pair: str, side: str, amount: float, rate: float, orderbook: Optional[OrderBook]) -> float: """ Get the market order fill price based on orderbook interpolation """ if self.exchange_has('fetchL2OrderBook'): if not orderbook: orderbook = self.fetch_l2_order_book(pair, 20) ob_type: OBLiteral = 'asks' if side == 'buy' else 'bids' slippage = 0.05 max_slippage_val = rate * ((1 + slippage) if side == 'buy' else (1 - slippage)) remaining_amount = amount filled_value = 0.0 book_entry_price = 0.0 for book_entry in orderbook[ob_type]: book_entry_price = book_entry[0] book_entry_coin_volume = book_entry[1] if remaining_amount > 0: if remaining_amount < book_entry_coin_volume: # Orderbook at this slot bigger than remaining amount filled_value += remaining_amount * book_entry_price break else: filled_value += book_entry_coin_volume * book_entry_price remaining_amount -= book_entry_coin_volume else: break else: # If remaining_amount wasn't consumed completely (break was not called) filled_value += remaining_amount * book_entry_price forecast_avg_filled_price = max(filled_value, 0) / amount # Limit max. slippage to specified value if side == 'buy': forecast_avg_filled_price = min(forecast_avg_filled_price, max_slippage_val) else: forecast_avg_filled_price = max(forecast_avg_filled_price, max_slippage_val) return self.price_to_precision(pair, forecast_avg_filled_price) return rate def _dry_is_price_crossed(self, pair: str, side: str, limit: float, orderbook: Optional[OrderBook] = None, offset: float = 0.0) -> bool: if not self.exchange_has('fetchL2OrderBook'): return True if not orderbook: orderbook = self.fetch_l2_order_book(pair, 1) try: if side == 'buy': price = orderbook['asks'][0][0] if limit * (1 - offset) >= price: return True else: price = orderbook['bids'][0][0] if limit * (1 + offset) <= price: return True except IndexError: # Ignore empty orderbooks when filling - can be filled with the next iteration. pass return False def check_dry_limit_order_filled( self, order: Dict[str, Any], immediate: bool = False, orderbook: Optional[OrderBook] = None) -> Dict[str, Any]: """ Check dry-run limit order fill and update fee (if it filled). """ if (order['status'] != "closed" and order['type'] in ["limit"] and not order.get('ft_order_type')): pair = order['symbol'] if self._dry_is_price_crossed(pair, order['side'], order['price'], orderbook): order.update({ 'status': 'closed', 'filled': order['amount'], 'remaining': 0, }) self.add_dry_order_fee( pair, order, 'taker' if immediate else 'maker', ) return order def fetch_dry_run_order(self, order_id) -> Dict[str, Any]: """ Return dry-run order Only call if running in dry-run mode. """ try: order = self._dry_run_open_orders[order_id] order = self.check_dry_limit_order_filled(order) return order except KeyError as e: order = Order.order_by_id(order_id) if order: ccxt_order = order.to_ccxt_object(self._ft_has['stop_price_prop']) self._dry_run_open_orders[order_id] = ccxt_order return ccxt_order # Gracefully handle errors with dry-run orders. raise InvalidOrderException( f'Tried to get an invalid dry-run-order (id: {order_id}). Message: {e}') from e # Order handling def _lev_prep(self, pair: str, leverage: float, side: BuySell, accept_fail: bool = False): if self.trading_mode != TradingMode.SPOT: self.set_margin_mode(pair, self.margin_mode, accept_fail) self._set_leverage(leverage, pair, accept_fail) def _get_params( self, side: BuySell, ordertype: str, leverage: float, reduceOnly: bool, time_in_force: str = 'GTC', ) -> Dict: params = self._params.copy() if time_in_force != 'GTC' and ordertype != 'market': params.update({'timeInForce': time_in_force.upper()}) if reduceOnly: params.update({'reduceOnly': True}) return params def _order_needs_price(self, ordertype: str) -> bool: return ( ordertype != 'market' or self._api.options.get("createMarketBuyOrderRequiresPrice", False) or self._ft_has.get('marketOrderRequiresPrice', False) ) def create_order( self, *, pair: str, ordertype: str, side: BuySell, amount: float, rate: float, leverage: float, reduceOnly: bool = False, time_in_force: str = 'GTC', ) -> Dict: if self._config['dry_run']: dry_order = self.create_dry_run_order( pair, ordertype, side, amount, self.price_to_precision(pair, rate), leverage) return dry_order params = self._get_params(side, ordertype, leverage, reduceOnly, time_in_force) try: # Set the precision for amount and price(rate) as accepted by the exchange amount = self.amount_to_precision(pair, self._amount_to_contracts(pair, amount)) needs_price = self._order_needs_price(ordertype) rate_for_order = self.price_to_precision(pair, rate) if needs_price else None if not reduceOnly: self._lev_prep(pair, leverage, side) order = self._api.create_order( pair, ordertype, side, amount, rate_for_order, params, ) if order.get('status') is None: # Map empty status to open. order['status'] = 'open' if order.get('type') is None: order['type'] = ordertype self._log_exchange_response('create_order', order) order = self._order_contracts_to_amount(order) return order except ccxt.InsufficientFunds as e: raise InsufficientFundsError( f'Insufficient funds to create {ordertype} {side} order on market {pair}. ' f'Tried to {side} amount {amount} at rate {rate}.' f'Message: {e}') from e except ccxt.InvalidOrder as e: raise InvalidOrderException( f'Could not create {ordertype} {side} order on market {pair}. ' f'Tried to {side} amount {amount} at rate {rate}. ' f'Message: {e}') from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not place {side} order due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e def stoploss_adjust(self, stop_loss: float, order: Dict, side: str) -> bool: """ Verify stop_loss against stoploss-order value (limit or price) Returns True if adjustment is necessary. """ if not self._ft_has.get('stoploss_on_exchange'): raise OperationalException(f"stoploss is not implemented for {self.name}.") price_param = self._ft_has['stop_price_prop'] return ( order.get(price_param, None) is None or ((side == "sell" and stop_loss > float(order[price_param])) or (side == "buy" and stop_loss < float(order[price_param]))) ) def _get_stop_order_type(self, user_order_type) -> Tuple[str, str]: available_order_Types: Dict[str, str] = self._ft_has["stoploss_order_types"] if user_order_type in available_order_Types.keys(): ordertype = available_order_Types[user_order_type] else: # Otherwise pick only one available ordertype = list(available_order_Types.values())[0] user_order_type = list(available_order_Types.keys())[0] return ordertype, user_order_type def _get_stop_limit_rate(self, stop_price: float, order_types: Dict, side: str) -> float: # Limit price threshold: As limit price should always be below stop-price limit_price_pct = order_types.get('stoploss_on_exchange_limit_ratio', 0.99) if side == "sell": limit_rate = stop_price * limit_price_pct else: limit_rate = stop_price * (2 - limit_price_pct) bad_stop_price = ((stop_price < limit_rate) if side == "sell" else (stop_price > limit_rate)) # Ensure rate is less than stop price if bad_stop_price: # This can for example happen if the stop / liquidation price is set to 0 # Which is possible if a market-order closes right away. # The InvalidOrderException will bubble up to exit_positions, where it will be # handled gracefully. raise InvalidOrderException( "In stoploss limit order, stop price should be more than limit price. " f"Stop price: {stop_price}, Limit price: {limit_rate}, " f"Limit Price pct: {limit_price_pct}" ) return limit_rate def _get_stop_params(self, side: BuySell, ordertype: str, stop_price: float) -> Dict: params = self._params.copy() # Verify if stopPrice works for your exchange, else configure stop_price_param params.update({self._ft_has['stop_price_param']: stop_price}) return params @retrier(retries=0) def create_stoploss(self, pair: str, amount: float, stop_price: float, order_types: Dict, side: BuySell, leverage: float) -> Dict: """ creates a stoploss order. requires `_ft_has['stoploss_order_types']` to be set as a dict mapping limit and market to the corresponding exchange type. The precise ordertype is determined by the order_types dict or exchange default. The exception below should never raise, since we disallow starting the bot in validate_ordertypes() This may work with a limited number of other exchanges, but correct working needs to be tested individually. WARNING: setting `stoploss_on_exchange` to True will NOT auto-enable stoploss on exchange. `stoploss_adjust` must still be implemented for this to work. """ if not self._ft_has['stoploss_on_exchange']: raise OperationalException(f"stoploss is not implemented for {self.name}.") user_order_type = order_types.get('stoploss', 'market') ordertype, user_order_type = self._get_stop_order_type(user_order_type) round_mode = ROUND_DOWN if side == 'buy' else ROUND_UP stop_price_norm = self.price_to_precision(pair, stop_price, rounding_mode=round_mode) limit_rate = None if user_order_type == 'limit': limit_rate = self._get_stop_limit_rate(stop_price, order_types, side) limit_rate = self.price_to_precision(pair, limit_rate, rounding_mode=round_mode) if self._config['dry_run']: dry_order = self.create_dry_run_order( pair, ordertype, side, amount, stop_price_norm, stop_loss=True, leverage=leverage, ) return dry_order try: params = self._get_stop_params(side=side, ordertype=ordertype, stop_price=stop_price_norm) if self.trading_mode == TradingMode.FUTURES: params['reduceOnly'] = True if 'stoploss_price_type' in order_types and 'stop_price_type_field' in self._ft_has: price_type = self._ft_has['stop_price_type_value_mapping'][ order_types.get('stoploss_price_type', PriceType.LAST)] params[self._ft_has['stop_price_type_field']] = price_type amount = self.amount_to_precision(pair, self._amount_to_contracts(pair, amount)) self._lev_prep(pair, leverage, side, accept_fail=True) order = self._api.create_order(symbol=pair, type=ordertype, side=side, amount=amount, price=limit_rate, params=params) self._log_exchange_response('create_stoploss_order', order) order = self._order_contracts_to_amount(order) logger.info(f"stoploss {user_order_type} order added for {pair}. " f"stop price: {stop_price}. limit: {limit_rate}") return order except ccxt.InsufficientFunds as e: raise InsufficientFundsError( f'Insufficient funds to create {ordertype} sell order on market {pair}. ' f'Tried to sell amount {amount} at rate {limit_rate}. ' f'Message: {e}') from e except ccxt.InvalidOrder as e: # Errors: # `Order would trigger immediately.` raise InvalidOrderException( f'Could not create {ordertype} sell order on market {pair}. ' f'Tried to sell amount {amount} at rate {limit_rate}. ' f'Message: {e}') from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f"Could not place stoploss order due to {e.__class__.__name__}. " f"Message: {e}") from e except ccxt.BaseError as e: raise OperationalException(e) from e @retrier(retries=API_FETCH_ORDER_RETRY_COUNT) def fetch_order(self, order_id: str, pair: str, params: Dict = {}) -> Dict: if self._config['dry_run']: return self.fetch_dry_run_order(order_id) try: order = self._api.fetch_order(order_id, pair, params=params) self._log_exchange_response('fetch_order', order) order = self._order_contracts_to_amount(order) return order except ccxt.OrderNotFound as e: raise RetryableOrderError( f'Order not found (pair: {pair} id: {order_id}). Message: {e}') from e except ccxt.InvalidOrder as e: raise InvalidOrderException( f'Tried to get an invalid order (pair: {pair} id: {order_id}). Message: {e}') from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not get order due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e def fetch_stoploss_order(self, order_id: str, pair: str, params: Dict = {}) -> Dict: return self.fetch_order(order_id, pair, params) def fetch_order_or_stoploss_order(self, order_id: str, pair: str, stoploss_order: bool = False) -> Dict: """ Simple wrapper calling either fetch_order or fetch_stoploss_order depending on the stoploss_order parameter :param order_id: OrderId to fetch order :param pair: Pair corresponding to order_id :param stoploss_order: If true, uses fetch_stoploss_order, otherwise fetch_order. """ if stoploss_order: return self.fetch_stoploss_order(order_id, pair) return self.fetch_order(order_id, pair) def check_order_canceled_empty(self, order: Dict) -> bool: """ Verify if an order has been cancelled without being partially filled :param order: Order dict as returned from fetch_order() :return: True if order has been cancelled without being filled, False otherwise. """ return (order.get('status') in NON_OPEN_EXCHANGE_STATES and order.get('filled') == 0.0) @retrier def cancel_order(self, order_id: str, pair: str, params: Dict = {}) -> Dict: if self._config['dry_run']: try: order = self.fetch_dry_run_order(order_id) order.update({'status': 'canceled', 'filled': 0.0, 'remaining': order['amount']}) return order except InvalidOrderException: return {} try: order = self._api.cancel_order(order_id, pair, params=params) self._log_exchange_response('cancel_order', order) order = self._order_contracts_to_amount(order) return order except ccxt.InvalidOrder as e: raise InvalidOrderException( f'Could not cancel order. Message: {e}') from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not cancel order due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e def cancel_stoploss_order(self, order_id: str, pair: str, params: Dict = {}) -> Dict: return self.cancel_order(order_id, pair, params) def is_cancel_order_result_suitable(self, corder) -> bool: if not isinstance(corder, dict): return False required = ('fee', 'status', 'amount') return all(corder.get(k, None) is not None for k in required) def cancel_order_with_result(self, order_id: str, pair: str, amount: float) -> Dict: """ Cancel order returning a result. Creates a fake result if cancel order returns a non-usable result and fetch_order does not work (certain exchanges don't return cancelled orders) :param order_id: Orderid to cancel :param pair: Pair corresponding to order_id :param amount: Amount to use for fake response :return: Result from either cancel_order if usable, or fetch_order """ try: corder = self.cancel_order(order_id, pair) if self.is_cancel_order_result_suitable(corder): return corder except InvalidOrderException: logger.warning(f"Could not cancel order {order_id} for {pair}.") try: order = self.fetch_order(order_id, pair) except InvalidOrderException: logger.warning(f"Could not fetch cancelled order {order_id}.") order = { 'id': order_id, 'status': 'canceled', 'amount': amount, 'filled': 0.0, 'fee': {}, 'info': {} } return order def cancel_stoploss_order_with_result(self, order_id: str, pair: str, amount: float) -> Dict: """ Cancel stoploss order returning a result. Creates a fake result if cancel order returns a non-usable result and fetch_order does not work (certain exchanges don't return cancelled orders) :param order_id: stoploss-order-id to cancel :param pair: Pair corresponding to order_id :param amount: Amount to use for fake response :return: Result from either cancel_order if usable, or fetch_order """ corder = self.cancel_stoploss_order(order_id, pair) if self.is_cancel_order_result_suitable(corder): return corder try: order = self.fetch_stoploss_order(order_id, pair) except InvalidOrderException: logger.warning(f"Could not fetch cancelled stoploss order {order_id}.") order = {'fee': {}, 'status': 'canceled', 'amount': amount, 'info': {}} return order @retrier def get_balances(self) -> dict: try: balances = self._api.fetch_balance() # Remove additional info from ccxt results balances.pop("info", None) balances.pop("free", None) balances.pop("total", None) balances.pop("used", None) return balances except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not get balance due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e @retrier def fetch_positions(self, pair: Optional[str] = None) -> List[Dict]: """ Fetch positions from the exchange. If no pair is given, all positions are returned. :param pair: Pair for the query """ if self._config['dry_run'] or self.trading_mode != TradingMode.FUTURES: return [] try: symbols = [] if pair: symbols.append(pair) positions: List[Dict] = self._api.fetch_positions(symbols) self._log_exchange_response('fetch_positions', positions) return positions except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not get positions due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e def _fetch_orders_emulate(self, pair: str, since_ms: int) -> List[Dict]: orders = [] if self.exchange_has('fetchClosedOrders'): orders = self._api.fetch_closed_orders(pair, since=since_ms) if self.exchange_has('fetchOpenOrders'): orders_open = self._api.fetch_open_orders(pair, since=since_ms) orders.extend(orders_open) return orders @retrier(retries=0) def fetch_orders(self, pair: str, since: datetime, params: Optional[Dict] = None) -> List[Dict]: """ Fetch all orders for a pair "since" :param pair: Pair for the query :param since: Starting time for the query """ if self._config['dry_run']: return [] try: since_ms = int((since.timestamp() - 10) * 1000) if self.exchange_has('fetchOrders'): if not params: params = {} try: orders: List[Dict] = self._api.fetch_orders(pair, since=since_ms, params=params) except ccxt.NotSupported: # Some exchanges don't support fetchOrders # attempt to fetch open and closed orders separately orders = self._fetch_orders_emulate(pair, since_ms) else: orders = self._fetch_orders_emulate(pair, since_ms) self._log_exchange_response('fetch_orders', orders) orders = [self._order_contracts_to_amount(o) for o in orders] return orders except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not fetch positions due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e @retrier def fetch_trading_fees(self) -> Dict[str, Any]: """ Fetch user account trading fees Can be cached, should not update often. """ if (self._config['dry_run'] or self.trading_mode != TradingMode.FUTURES or not self.exchange_has('fetchTradingFees')): return {} try: trading_fees: Dict[str, Any] = self._api.fetch_trading_fees() self._log_exchange_response('fetch_trading_fees', trading_fees) return trading_fees except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not fetch trading fees due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e @retrier def fetch_bids_asks(self, symbols: Optional[List[str]] = None, cached: bool = False) -> Dict: """ :param cached: Allow cached result :return: fetch_tickers result """ if not self.exchange_has('fetchBidsAsks'): return {} if cached: with self._cache_lock: tickers = self._fetch_tickers_cache.get('fetch_bids_asks') if tickers: return tickers try: tickers = self._api.fetch_bids_asks(symbols) with self._cache_lock: self._fetch_tickers_cache['fetch_bids_asks'] = tickers return tickers except ccxt.NotSupported as e: raise OperationalException( f'Exchange {self._api.name} does not support fetching bids/asks in batch. ' f'Message: {e}') from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not load bids/asks due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e @retrier def get_tickers(self, symbols: Optional[List[str]] = None, cached: bool = False) -> Tickers: """ :param cached: Allow cached result :return: fetch_tickers result """ tickers: Tickers if not self.exchange_has('fetchTickers'): return {} if cached: with self._cache_lock: tickers = self._fetch_tickers_cache.get('fetch_tickers') # type: ignore if tickers: return tickers try: tickers = self._api.fetch_tickers(symbols) with self._cache_lock: self._fetch_tickers_cache['fetch_tickers'] = tickers return tickers except ccxt.NotSupported as e: raise OperationalException( f'Exchange {self._api.name} does not support fetching tickers in batch. ' f'Message: {e}') from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not load tickers due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e # Pricing info @retrier def fetch_ticker(self, pair: str) -> Ticker: try: if (pair not in self.markets or self.markets[pair].get('active', False) is False): raise ExchangeError(f"Pair {pair} not available") data: Ticker = self._api.fetch_ticker(pair) return data except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not load ticker due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e @staticmethod def get_next_limit_in_list(limit: int, limit_range: Optional[List[int]], range_required: bool = True): """ Get next greater value in the list. Used by fetch_l2_order_book if the api only supports a limited range """ if not limit_range: return limit result = min([x for x in limit_range if limit <= x] + [max(limit_range)]) if not range_required and limit > result: # Range is not required - we can use None as parameter. return None return result @retrier def fetch_l2_order_book(self, pair: str, limit: int = 100) -> OrderBook: """ Get L2 order book from exchange. Can be limited to a certain amount (if supported). Returns a dict in the format {'asks': [price, volume], 'bids': [price, volume]} """ limit1 = self.get_next_limit_in_list(limit, self._ft_has['l2_limit_range'], self._ft_has['l2_limit_range_required']) try: return self._api.fetch_l2_order_book(pair, limit1) except ccxt.NotSupported as e: raise OperationalException( f'Exchange {self._api.name} does not support fetching order book.' f'Message: {e}') from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not get order book due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e def _get_price_side(self, side: str, is_short: bool, conf_strategy: Dict) -> BidAsk: price_side = conf_strategy['price_side'] if price_side in ('same', 'other'): price_map = { ('entry', 'long', 'same'): 'bid', ('entry', 'long', 'other'): 'ask', ('entry', 'short', 'same'): 'ask', ('entry', 'short', 'other'): 'bid', ('exit', 'long', 'same'): 'ask', ('exit', 'long', 'other'): 'bid', ('exit', 'short', 'same'): 'bid', ('exit', 'short', 'other'): 'ask', } price_side = price_map[(side, 'short' if is_short else 'long', price_side)] return price_side def get_rate(self, pair: str, refresh: bool, side: EntryExit, is_short: bool, order_book: Optional[OrderBook] = None, ticker: Optional[Ticker] = None) -> float: """ Calculates bid/ask target bid rate - between current ask price and last price ask rate - either using ticker bid or first bid based on orderbook or remain static in any other case since it's not updating. :param pair: Pair to get rate for :param refresh: allow cached data :param side: "buy" or "sell" :return: float: Price :raises PricingError if orderbook price could not be determined. """ name = side.capitalize() strat_name = 'entry_pricing' if side == "entry" else 'exit_pricing' cache_rate: TTLCache = self._entry_rate_cache if side == "entry" else self._exit_rate_cache if not refresh: with self._cache_lock: rate = cache_rate.get(pair) # Check if cache has been invalidated if rate: logger.debug(f"Using cached {side} rate for {pair}.") return rate conf_strategy = self._config.get(strat_name, {}) price_side = self._get_price_side(side, is_short, conf_strategy) if conf_strategy.get('use_order_book', False): order_book_top = conf_strategy.get('order_book_top', 1) if order_book is None: order_book = self.fetch_l2_order_book(pair, order_book_top) rate = self._get_rate_from_ob(pair, side, order_book, name, price_side, order_book_top) else: logger.debug(f"Using Last {price_side.capitalize()} / Last Price") if ticker is None: ticker = self.fetch_ticker(pair) rate = self._get_rate_from_ticker(side, ticker, conf_strategy, price_side) if rate is None: raise PricingError(f"{name}-Rate for {pair} was empty.") with self._cache_lock: cache_rate[pair] = rate return rate def _get_rate_from_ticker(self, side: EntryExit, ticker: Ticker, conf_strategy: Dict[str, Any], price_side: BidAsk) -> Optional[float]: """ Get rate from ticker. """ ticker_rate = ticker[price_side] if ticker['last'] and ticker_rate: if side == 'entry' and ticker_rate > ticker['last']: balance = conf_strategy.get('price_last_balance', 0.0) ticker_rate = ticker_rate + balance * (ticker['last'] - ticker_rate) elif side == 'exit' and ticker_rate < ticker['last']: balance = conf_strategy.get('price_last_balance', 0.0) ticker_rate = ticker_rate - balance * (ticker_rate - ticker['last']) rate = ticker_rate return rate def _get_rate_from_ob(self, pair: str, side: EntryExit, order_book: OrderBook, name: str, price_side: BidAsk, order_book_top: int) -> float: """ Get rate from orderbook :raises: PricingError if rate could not be determined. """ logger.debug('order_book %s', order_book) # top 1 = index 0 try: obside: OBLiteral = 'bids' if price_side == 'bid' else 'asks' rate = order_book[obside][order_book_top - 1][0] except (IndexError, KeyError) as e: logger.warning( f"{pair} - {name} Price at location {order_book_top} from orderbook " f"could not be determined. Orderbook: {order_book}" ) raise PricingError from e logger.debug(f"{pair} - {name} price from orderbook {price_side.capitalize()}" f"side - top {order_book_top} order book {side} rate {rate:.8f}") return rate def get_rates(self, pair: str, refresh: bool, is_short: bool) -> Tuple[float, float]: entry_rate = None exit_rate = None if not refresh: with self._cache_lock: entry_rate = self._entry_rate_cache.get(pair) exit_rate = self._exit_rate_cache.get(pair) if entry_rate: logger.debug(f"Using cached buy rate for {pair}.") if exit_rate: logger.debug(f"Using cached sell rate for {pair}.") entry_pricing = self._config.get('entry_pricing', {}) exit_pricing = self._config.get('exit_pricing', {}) order_book = ticker = None if not entry_rate and entry_pricing.get('use_order_book', False): order_book_top = max(entry_pricing.get('order_book_top', 1), exit_pricing.get('order_book_top', 1)) order_book = self.fetch_l2_order_book(pair, order_book_top) entry_rate = self.get_rate(pair, refresh, 'entry', is_short, order_book=order_book) elif not entry_rate: ticker = self.fetch_ticker(pair) entry_rate = self.get_rate(pair, refresh, 'entry', is_short, ticker=ticker) if not exit_rate: exit_rate = self.get_rate(pair, refresh, 'exit', is_short, order_book=order_book, ticker=ticker) return entry_rate, exit_rate # Fee handling @retrier def get_trades_for_order(self, order_id: str, pair: str, since: datetime, params: Optional[Dict] = None) -> List: """ Fetch Orders using the "fetch_my_trades" endpoint and filter them by order-id. The "since" argument passed in is coming from the database and is in UTC, as timezone-native datetime object. From the python documentation: > Naive datetime instances are assumed to represent local time Therefore, calling "since.timestamp()" will get the UTC timestamp, after applying the transformation from local timezone to UTC. This works for timezones UTC+ since then the result will contain trades from a few hours instead of from the last 5 seconds, however fails for UTC- timezones, since we're then asking for trades with a "since" argument in the future. :param order_id order_id: Order-id as given when creating the order :param pair: Pair the order is for :param since: datetime object of the order creation time. Assumes object is in UTC. """ if self._config['dry_run']: return [] if not self.exchange_has('fetchMyTrades'): return [] try: # Allow 5s offset to catch slight time offsets (discovered in #1185) # since needs to be int in milliseconds _params = params if params else {} my_trades = self._api.fetch_my_trades( pair, int((since.replace(tzinfo=timezone.utc).timestamp() - 5) * 1000), params=_params) matched_trades = [trade for trade in my_trades if trade['order'] == order_id] self._log_exchange_response('get_trades_for_order', matched_trades) matched_trades = self._trades_contracts_to_amount(matched_trades) return matched_trades except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not get trades due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e def get_order_id_conditional(self, order: Dict[str, Any]) -> str: return order['id'] @retrier def get_fee(self, symbol: str, type: str = '', side: str = '', amount: float = 1, price: float = 1, taker_or_maker: MakerTaker = 'maker') -> float: """ Retrieve fee from exchange :param symbol: Pair :param type: Type of order (market, limit, ...) :param side: Side of order (buy, sell) :param amount: Amount of order :param price: Price of order :param taker_or_maker: 'maker' or 'taker' (ignored if "type" is provided) """ if type and type == 'market': taker_or_maker = 'taker' try: if self._config['dry_run'] and self._config.get('fee', None) is not None: return self._config['fee'] # validate that markets are loaded before trying to get fee if self._api.markets is None or len(self._api.markets) == 0: self._api.load_markets(params={}) return self._api.calculate_fee(symbol=symbol, type=type, side=side, amount=amount, price=price, takerOrMaker=taker_or_maker)['rate'] except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not get fee info due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e @staticmethod def order_has_fee(order: Dict) -> bool: """ Verifies if the passed in order dict has the needed keys to extract fees, and that these keys (currency, cost) are not empty. :param order: Order or trade (one trade) dict :return: True if the fee substructure contains currency and cost, false otherwise """ if not isinstance(order, dict): return False return ('fee' in order and order['fee'] is not None and (order['fee'].keys() >= {'currency', 'cost'}) and order['fee']['currency'] is not None and order['fee']['cost'] is not None ) def calculate_fee_rate( self, fee: Dict, symbol: str, cost: float, amount: float) -> Optional[float]: """ Calculate fee rate if it's not given by the exchange. :param fee: ccxt Fee dict - must contain cost / currency / rate :param symbol: Symbol of the order :param cost: Total cost of the order :param amount: Amount of the order """ if fee.get('rate') is not None: return fee.get('rate') fee_curr = fee.get('currency') if fee_curr is None: return None fee_cost = float(fee['cost']) # Calculate fee based on order details if fee_curr == self.get_pair_base_currency(symbol): # Base currency - divide by amount return round(fee_cost / amount, 8) elif fee_curr == self.get_pair_quote_currency(symbol): # Quote currency - divide by cost return round(fee_cost / cost, 8) if cost else None else: # If Fee currency is a different currency if not cost: # If cost is None or 0.0 -> falsy, return None return None try: comb = self.get_valid_pair_combination(fee_curr, self._config['stake_currency']) tick = self.fetch_ticker(comb) fee_to_quote_rate = safe_value_fallback2(tick, tick, 'last', 'ask') except (ValueError, ExchangeError): fee_to_quote_rate = self._config['exchange'].get('unknown_fee_rate', None) if not fee_to_quote_rate: return None return round((fee_cost * fee_to_quote_rate) / cost, 8) def extract_cost_curr_rate(self, fee: Dict, symbol: str, cost: float, amount: float) -> Tuple[float, str, Optional[float]]: """ Extract tuple of cost, currency, rate. Requires order_has_fee to run first! :param fee: ccxt Fee dict - must contain cost / currency / rate :param symbol: Symbol of the order :param cost: Total cost of the order :param amount: Amount of the order :return: Tuple with cost, currency, rate of the given fee dict """ return (float(fee['cost']), fee['currency'], self.calculate_fee_rate( fee, symbol, cost, amount ) ) # Historic data def get_historic_ohlcv(self, pair: str, timeframe: str, since_ms: int, candle_type: CandleType, is_new_pair: bool = False, until_ms: Optional[int] = None) -> List: """ Get candle history using asyncio and returns the list of candles. Handles all async work for this. Async over one pair, assuming we get `self.ohlcv_candle_limit()` candles per call. :param pair: Pair to download :param timeframe: Timeframe to get data for :param since_ms: Timestamp in milliseconds to get history from :param until_ms: Timestamp in milliseconds to get history up to :param candle_type: '', mark, index, premiumIndex, or funding_rate :return: List with candle (OHLCV) data """ pair, _, _, data, _ = self.loop.run_until_complete( self._async_get_historic_ohlcv(pair=pair, timeframe=timeframe, since_ms=since_ms, until_ms=until_ms, is_new_pair=is_new_pair, candle_type=candle_type)) logger.info(f"Downloaded data for {pair} with length {len(data)}.") return data async def _async_get_historic_ohlcv(self, pair: str, timeframe: str, since_ms: int, candle_type: CandleType, is_new_pair: bool = False, raise_: bool = False, until_ms: Optional[int] = None ) -> OHLCVResponse: """ Download historic ohlcv :param is_new_pair: used by binance subclass to allow "fast" new pair downloading :param candle_type: Any of the enum CandleType (must match trading mode!) """ one_call = timeframe_to_msecs(timeframe) * self.ohlcv_candle_limit( timeframe, candle_type, since_ms) logger.debug( "one_call: %s msecs (%s)", one_call, dt_humanize(dt_now() - timedelta(milliseconds=one_call), only_distance=True) ) input_coroutines = [self._async_get_candle_history( pair, timeframe, candle_type, since) for since in range(since_ms, until_ms or dt_ts(), one_call)] data: List = [] # Chunk requests into batches of 100 to avoid overwelming ccxt Throttling for input_coro in chunks(input_coroutines, 100): results = await asyncio.gather(*input_coro, return_exceptions=True) for res in results: if isinstance(res, Exception): logger.warning(f"Async code raised an exception: {repr(res)}") if raise_: raise continue else: # Deconstruct tuple if it's not an exception p, _, c, new_data, _ = res if p == pair and c == candle_type: data.extend(new_data) # Sort data again after extending the result - above calls return in "async order" data = sorted(data, key=lambda x: x[0]) return pair, timeframe, candle_type, data, self._ohlcv_partial_candle def _build_coroutine( self, pair: str, timeframe: str, candle_type: CandleType, since_ms: Optional[int], cache: bool) -> Coroutine[Any, Any, OHLCVResponse]: not_all_data = cache and self.required_candle_call_count > 1 if cache and (pair, timeframe, candle_type) in self._klines: candle_limit = self.ohlcv_candle_limit(timeframe, candle_type) min_date = date_minus_candles(timeframe, candle_limit - 5).timestamp() # Check if 1 call can get us updated candles without hole in the data. if min_date < self._pairs_last_refresh_time.get((pair, timeframe, candle_type), 0): # Cache can be used - do one-off call. not_all_data = False else: # Time jump detected, evict cache logger.info( f"Time jump detected. Evicting cache for {pair}, {timeframe}, {candle_type}") del self._klines[(pair, timeframe, candle_type)] if (not since_ms and (self._ft_has["ohlcv_require_since"] or not_all_data)): # Multiple calls for one pair - to get more history one_call = timeframe_to_msecs(timeframe) * self.ohlcv_candle_limit( timeframe, candle_type, since_ms) move_to = one_call * self.required_candle_call_count now = timeframe_to_next_date(timeframe) since_ms = int((now - timedelta(seconds=move_to // 1000)).timestamp() * 1000) if since_ms: return self._async_get_historic_ohlcv( pair, timeframe, since_ms=since_ms, raise_=True, candle_type=candle_type) else: # One call ... "regular" refresh return self._async_get_candle_history( pair, timeframe, since_ms=since_ms, candle_type=candle_type) def _build_ohlcv_dl_jobs( self, pair_list: ListPairsWithTimeframes, since_ms: Optional[int], cache: bool) -> Tuple[List[Coroutine], List[Tuple[str, str, CandleType]]]: """ Build Coroutines to execute as part of refresh_latest_ohlcv """ input_coroutines: List[Coroutine[Any, Any, OHLCVResponse]] = [] cached_pairs = [] for pair, timeframe, candle_type in set(pair_list): if (timeframe not in self.timeframes and candle_type in (CandleType.SPOT, CandleType.FUTURES)): logger.warning( f"Cannot download ({pair}, {timeframe}) combination as this timeframe is " f"not available on {self.name}. Available timeframes are " f"{', '.join(self.timeframes)}.") continue if ((pair, timeframe, candle_type) not in self._klines or not cache or self._now_is_time_to_refresh(pair, timeframe, candle_type)): input_coroutines.append( self._build_coroutine(pair, timeframe, candle_type, since_ms, cache)) else: logger.debug( f"Using cached candle (OHLCV) data for {pair}, {timeframe}, {candle_type} ..." ) cached_pairs.append((pair, timeframe, candle_type)) return input_coroutines, cached_pairs def _process_ohlcv_df(self, pair: str, timeframe: str, c_type: CandleType, ticks: List[List], cache: bool, drop_incomplete: bool) -> DataFrame: # keeping last candle time as last refreshed time of the pair if ticks and cache: idx = -2 if drop_incomplete and len(ticks) > 1 else -1 self._pairs_last_refresh_time[(pair, timeframe, c_type)] = ticks[idx][0] // 1000 # keeping parsed dataframe in cache ohlcv_df = ohlcv_to_dataframe(ticks, timeframe, pair=pair, fill_missing=True, drop_incomplete=drop_incomplete) if cache: if (pair, timeframe, c_type) in self._klines: old = self._klines[(pair, timeframe, c_type)] # Reassign so we return the updated, combined df ohlcv_df = clean_ohlcv_dataframe(concat([old, ohlcv_df], axis=0), timeframe, pair, fill_missing=True, drop_incomplete=False) candle_limit = self.ohlcv_candle_limit(timeframe, self._config['candle_type_def']) # Age out old candles ohlcv_df = ohlcv_df.tail(candle_limit + self._startup_candle_count) ohlcv_df = ohlcv_df.reset_index(drop=True) self._klines[(pair, timeframe, c_type)] = ohlcv_df else: self._klines[(pair, timeframe, c_type)] = ohlcv_df return ohlcv_df def refresh_latest_ohlcv(self, pair_list: ListPairsWithTimeframes, *, since_ms: Optional[int] = None, cache: bool = True, drop_incomplete: Optional[bool] = None ) -> Dict[PairWithTimeframe, DataFrame]: """ Refresh in-memory OHLCV asynchronously and set `_klines` with the result Loops asynchronously over pair_list and downloads all pairs async (semi-parallel). Only used in the dataprovider.refresh() method. :param pair_list: List of 2 element tuples containing pair, interval to refresh :param since_ms: time since when to download, in milliseconds :param cache: Assign result to _klines. Usefull for one-off downloads like for pairlists :param drop_incomplete: Control candle dropping. Specifying None defaults to _ohlcv_partial_candle :return: Dict of [{(pair, timeframe): Dataframe}] """ logger.debug("Refreshing candle (OHLCV) data for %d pairs", len(pair_list)) # Gather coroutines to run input_coroutines, cached_pairs = self._build_ohlcv_dl_jobs(pair_list, since_ms, cache) results_df = {} # Chunk requests into batches of 100 to avoid overwelming ccxt Throttling for input_coro in chunks(input_coroutines, 100): async def gather_stuff(): return await asyncio.gather(*input_coro, return_exceptions=True) with self._loop_lock: results = self.loop.run_until_complete(gather_stuff()) for res in results: if isinstance(res, Exception): logger.warning(f"Async code raised an exception: {repr(res)}") continue # Deconstruct tuple (has 5 elements) pair, timeframe, c_type, ticks, drop_hint = res drop_incomplete_ = drop_hint if drop_incomplete is None else drop_incomplete ohlcv_df = self._process_ohlcv_df( pair, timeframe, c_type, ticks, cache, drop_incomplete_) results_df[(pair, timeframe, c_type)] = ohlcv_df # Return cached klines for pair, timeframe, c_type in cached_pairs: results_df[(pair, timeframe, c_type)] = self.klines( (pair, timeframe, c_type), copy=False ) return results_df def _now_is_time_to_refresh(self, pair: str, timeframe: str, candle_type: CandleType) -> bool: # Timeframe in seconds
interval_in_sec = timeframe_to_seconds(timeframe)
21
2023-10-21 10:02:05+00:00
24k
yanzhh/HGERE
transformers/src/transformers/modeling_initdistilbert.py
[ { "identifier": "gelu", "path": "transformers/src/transformers/activations.py", "snippet": "def swish(x):\ndef _gelu_python(x):\ndef gelu_new(x):\ndef get_activation(activation_string):\nACT2FN = {\n \"relu\": F.relu,\n \"swish\": swish,\n \"gelu\": gelu,\n \"tanh\": F.tanh,\n \"gelu_new\...
import copy import logging import math import numpy as np import torch import torch.nn as nn from torch.nn import CrossEntropyLoss from .activations import gelu from .configuration_distilbert import DistilBertConfig from .file_utils import add_start_docstrings, add_start_docstrings_to_callable from .modeling_utils import PreTrainedModel, prune_linear_layer from .modeling_distilbert import DistilBertModel
18,023
if (i in [1, mid]): h1 = gelu(self.linear_1[w](hidden_states)) h2 = self.linear_2[w](h1).squeeze(-1) if question_ends_mask is None: prob = torch.sigmoid(h2) # temp_loss = MSE(prob, rank_prob[:,w,:]) * seq_mask temp_loss = torch.norm((prob - rank_prob[:,w,:]) * seq_mask, p=1) loss += temp_loss / torch.sum(seq_mask) else: h2 = h2.view(-1, 4, num_items) h2 = torch.mean(h2, dim=1) prob = torch.sigmoid(h2) temp_loss = torch.norm((prob - rank_prob[:,w,:]) * question_ends_mask, p=1) loss += temp_loss / torch.sum(question_ends_mask) layer_outputs = layer_module( hidden_states, attn_mask, head_mask[i], ) hidden_states = layer_outputs[0] outputs = (hidden_states, loss) return outputs # INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL # class DistilBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = DistilBertConfig pretrained_model_archive_map = DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP load_tf_weights = None base_model_prefix = "distilbert" def _init_weights(self, module): """ Initialize the weights. """ if isinstance(module, nn.Embedding): if module.weight.requires_grad: module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() DISTILBERT_START_DOCSTRING = r""" This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.DistilBertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ DISTILBERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`transformers.DistilBertTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.encode_plus` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. `What are attention masks? <../glossary.html#attention-mask>`__ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. """ @add_start_docstrings( "The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.", DISTILBERT_START_DOCSTRING, ) class InitDistilBertModel(DistilBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.embeddings = Embeddings(config) # Embeddings self.transformer = Transformer(config) # Encoder self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, new_embeddings): self.embeddings.word_embeddings = new_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.transformer.layer[layer].attention.prune_heads(heads)
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch DistilBERT model adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) and in part from HuggingFace PyTorch version of Google AI Bert model (https://github.com/google-research/bert) """ logger = logging.getLogger(__name__) DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP = { "distilbert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-pytorch_model.bin", "distilbert-base-uncased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-distilled-squad-pytorch_model.bin", "distilbert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-cased-pytorch_model.bin", "distilbert-base-cased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-cased-distilled-squad-pytorch_model.bin", "distilbert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-german-cased-pytorch_model.bin", "distilbert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-multilingual-cased-pytorch_model.bin", "distilbert-base-uncased-finetuned-sst-2-english": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-finetuned-sst-2-english-pytorch_model.bin", } # UTILS AND BUILDING BLOCKS OF THE ARCHITECTURE # def create_sinusoidal_embeddings(n_pos, dim, out): position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]) out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2])) out.detach_() out.requires_grad = False class Embeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.dim, padding_idx=0) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.dim) if config.sinusoidal_pos_embds: create_sinusoidal_embeddings( n_pos=config.max_position_embeddings, dim=config.dim, out=self.position_embeddings.weight ) self.LayerNorm = nn.LayerNorm(config.dim, eps=1e-12) self.dropout = nn.Dropout(config.dropout) def forward(self, input_ids): """ Parameters ---------- input_ids: torch.tensor(bs, max_seq_length) The token ids to embed. Outputs ------- embeddings: torch.tensor(bs, max_seq_length, dim) The embedded tokens (plus position embeddings, no token_type embeddings) """ seq_length = input_ids.size(1) position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) # (max_seq_length) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # (bs, max_seq_length) word_embeddings = self.word_embeddings(input_ids) # (bs, max_seq_length, dim) position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim) embeddings = word_embeddings + position_embeddings # (bs, max_seq_length, dim) embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim) embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim) return embeddings class MultiHeadSelfAttention(nn.Module): def __init__(self, config): super().__init__() self.n_heads = config.n_heads self.dim = config.dim self.dropout = nn.Dropout(p=config.attention_dropout) self.output_attentions = config.output_attentions assert self.dim % self.n_heads == 0 self.q_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.k_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.v_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.out_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.pruned_heads = set() def prune_heads(self, heads): attention_head_size = self.dim // self.n_heads if len(heads) == 0: return mask = torch.ones(self.n_heads, attention_head_size) heads = set(heads) - self.pruned_heads for head in heads: head -= sum(1 if h < head else 0 for h in self.pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index = torch.arange(len(mask))[mask].long() # Prune linear layers self.q_lin = prune_linear_layer(self.q_lin, index) self.k_lin = prune_linear_layer(self.k_lin, index) self.v_lin = prune_linear_layer(self.v_lin, index) self.out_lin = prune_linear_layer(self.out_lin, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.dim = attention_head_size * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, query, key, value, mask, head_mask=None): """ Parameters ---------- query: torch.tensor(bs, seq_length, dim) key: torch.tensor(bs, seq_length, dim) value: torch.tensor(bs, seq_length, dim) mask: torch.tensor(bs, seq_length) Outputs ------- weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs, seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True` """ bs, q_length, dim = query.size() k_length = key.size(1) # assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim) # assert key.size() == value.size() dim_per_head = self.dim // self.n_heads mask_reshp = (bs, 1, 1, k_length) def shape(x): """ separate heads """ return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2) def unshape(x): """ group heads """ return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head) q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head) k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head) v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head) q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head) scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, q_length, k_length) mask = (mask == 0).view(mask_reshp).expand_as(scores) # (bs, n_heads, q_length, k_length) scores.masked_fill_(mask, -float("inf")) # (bs, n_heads, q_length, k_length) weights = nn.Softmax(dim=-1)(scores) # (bs, n_heads, q_length, k_length) weights = self.dropout(weights) # (bs, n_heads, q_length, k_length) # Mask heads if we want to if head_mask is not None: weights = weights * head_mask context = torch.matmul(weights, v) # (bs, n_heads, q_length, dim_per_head) context = unshape(context) # (bs, q_length, dim) context = self.out_lin(context) # (bs, q_length, dim) if self.output_attentions: return (context, weights) else: return (context,) class FFN(nn.Module): def __init__(self, config): super().__init__() self.dropout = nn.Dropout(p=config.dropout) self.lin1 = nn.Linear(in_features=config.dim, out_features=config.hidden_dim) self.lin2 = nn.Linear(in_features=config.hidden_dim, out_features=config.dim) assert config.activation in ["relu", "gelu"], "activation ({}) must be in ['relu', 'gelu']".format( config.activation ) self.activation = gelu if config.activation == "gelu" else nn.ReLU() def forward(self, input): x = self.lin1(input) x = self.activation(x) x = self.lin2(x) x = self.dropout(x) return x class TransformerBlock(nn.Module): def __init__(self, config): super().__init__() self.output_attentions = config.output_attentions assert config.dim % config.n_heads == 0 self.attention = MultiHeadSelfAttention(config) self.sa_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12) self.ffn = FFN(config) self.output_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12) def forward(self, x, attn_mask=None, head_mask=None): """ Parameters ---------- x: torch.tensor(bs, seq_length, dim) attn_mask: torch.tensor(bs, seq_length) Outputs ------- sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output: torch.tensor(bs, seq_length, dim) The output of the transformer block contextualization. """ # Self-Attention sa_output = self.attention(query=x, key=x, value=x, mask=attn_mask, head_mask=head_mask) if self.output_attentions: sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length) else: # To handle these `output_attention` or `output_hidden_states` cases returning tuples assert type(sa_output) == tuple sa_output = sa_output[0] sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim) # Feed Forward Network ffn_output = self.ffn(sa_output) # (bs, seq_length, dim) ffn_output = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim) output = (ffn_output,) if self.output_attentions: output = (sa_weights,) + output return output class Transformer(nn.Module): def __init__(self, config): super().__init__() self.n_layers = config.n_layers self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states layer = TransformerBlock(config) self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.n_layers)]) self.linear_size = 32 self.linear_1 = nn.ModuleList([nn.Linear(config.hidden_size, self.linear_size), nn.Linear(config.hidden_size, self.linear_size), # nn.Linear(config.hidden_size, self.linear_size), # nn.Linear(config.hidden_size, self.linear_size), # nn.Linear(config.hidden_size, self.linear_size), # nn.Linear(config.hidden_size, self.linear_size), # nn.Linear(config.hidden_size, self.linear_size), # nn.Linear(config.hidden_size, self.linear_size) ]) self.linear_2 = nn.ModuleList([nn.Linear(self.linear_size, 1), nn.Linear(self.linear_size, 1), # nn.Linear(self.linear_size, 1), # nn.Linear(self.linear_size, 1), # nn.Linear(self.linear_size, 1), # nn.Linear(self.linear_size, 1), # nn.Linear(self.linear_size, 1), # nn.Linear(self.linear_size, 1) ]) def forward(self, x, attn_mask=None, head_mask=None, rank_prob=None, question_ends_mask=None): all_hidden_states = () all_attentions = () hidden_states = x bsz, ori_num_items, dim = hidden_states.size() seq_mask = attn_mask.to(dtype=next(self.parameters()).dtype) num_items = hidden_states.size(1) tot_zoom = None tot_select_loss = 0 Ls = [] w = 0 mid = len(self.layer)//2 loss = 0 for i, layer_module in enumerate(self.layer): if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if (i in [1, mid]): h1 = gelu(self.linear_1[w](hidden_states)) h2 = self.linear_2[w](h1).squeeze(-1) if question_ends_mask is None: prob = torch.sigmoid(h2) # temp_loss = MSE(prob, rank_prob[:,w,:]) * seq_mask temp_loss = torch.norm((prob - rank_prob[:,w,:]) * seq_mask, p=1) loss += temp_loss / torch.sum(seq_mask) else: h2 = h2.view(-1, 4, num_items) h2 = torch.mean(h2, dim=1) prob = torch.sigmoid(h2) temp_loss = torch.norm((prob - rank_prob[:,w,:]) * question_ends_mask, p=1) loss += temp_loss / torch.sum(question_ends_mask) layer_outputs = layer_module( hidden_states, attn_mask, head_mask[i], ) hidden_states = layer_outputs[0] outputs = (hidden_states, loss) return outputs # INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL # class DistilBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = DistilBertConfig pretrained_model_archive_map = DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP load_tf_weights = None base_model_prefix = "distilbert" def _init_weights(self, module): """ Initialize the weights. """ if isinstance(module, nn.Embedding): if module.weight.requires_grad: module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() DISTILBERT_START_DOCSTRING = r""" This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.DistilBertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ DISTILBERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`transformers.DistilBertTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.encode_plus` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. `What are attention masks? <../glossary.html#attention-mask>`__ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. """ @add_start_docstrings( "The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.", DISTILBERT_START_DOCSTRING, ) class InitDistilBertModel(DistilBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.embeddings = Embeddings(config) # Embeddings self.transformer = Transformer(config) # Encoder self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, new_embeddings): self.embeddings.word_embeddings = new_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.transformer.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_callable(DISTILBERT_INPUTS_DOCSTRING)
3
2023-10-15 02:31:09+00:00
24k
akashgreninja/GreSec
backend/venv/lib/python3.10/site-packages/pydantic/type_adapter.py
[ { "identifier": "_config", "path": "backend/venv/lib/python3.10/site-packages/pydantic/_internal/_config.py", "snippet": "DEPRECATION_MESSAGE = 'Support for class-based `config` is deprecated, use ConfigDict instead.'\nV2_REMOVED_KEYS = {\n 'allow_mutation',\n 'error_msg_templates',\n 'fields',...
import sys from dataclasses import is_dataclass from typing import TYPE_CHECKING, Any, Dict, Generic, Iterable, Set, TypeVar, Union, overload from pydantic_core import CoreSchema, SchemaSerializer, SchemaValidator, Some from typing_extensions import Literal, is_typeddict from pydantic.errors import PydanticUserError from pydantic.main import BaseModel from ._internal import _config, _core_utils, _discriminated_union, _generate_schema, _typing_extra from .config import ConfigDict from .json_schema import ( DEFAULT_REF_TEMPLATE, GenerateJsonSchema, JsonSchemaKeyT, JsonSchemaMode, JsonSchemaValue, ) from .plugin._schema_validator import create_schema_validator
15,770
def validate_strings(self, __obj: Any, *, strict: bool | None = None, context: dict[str, Any] | None = None) -> T: """Validate object contains string data against the model. Args: __obj: The object contains string data to validate. strict: Whether to strictly check types. context: Additional context to use during validation. Returns: The validated object. """ return self.validator.validate_strings(__obj, strict=strict, context=context) def get_default_value(self, *, strict: bool | None = None, context: dict[str, Any] | None = None) -> Some[T] | None: """Get the default value for the wrapped type. Args: strict: Whether to strictly check types. context: Additional context to pass to the validator. Returns: The default value wrapped in a `Some` if there is one or None if not. """ return self.validator.get_default_value(strict=strict, context=context) def dump_python( self, __instance: T, *, mode: Literal['json', 'python'] = 'python', include: IncEx | None = None, exclude: IncEx | None = None, by_alias: bool = False, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, round_trip: bool = False, warnings: bool = True, ) -> Any: """Dump an instance of the adapted type to a Python object. Args: __instance: The Python object to serialize. mode: The output format. include: Fields to include in the output. exclude: Fields to exclude from the output. by_alias: Whether to use alias names for field names. exclude_unset: Whether to exclude unset fields. exclude_defaults: Whether to exclude fields with default values. exclude_none: Whether to exclude fields with None values. round_trip: Whether to output the serialized data in a way that is compatible with deserialization. warnings: Whether to display serialization warnings. Returns: The serialized object. """ return self.serializer.to_python( __instance, mode=mode, by_alias=by_alias, include=include, exclude=exclude, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, round_trip=round_trip, warnings=warnings, ) def dump_json( self, __instance: T, *, indent: int | None = None, include: IncEx | None = None, exclude: IncEx | None = None, by_alias: bool = False, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, round_trip: bool = False, warnings: bool = True, ) -> bytes: """Serialize an instance of the adapted type to JSON. Args: __instance: The instance to be serialized. indent: Number of spaces for JSON indentation. include: Fields to include. exclude: Fields to exclude. by_alias: Whether to use alias names for field names. exclude_unset: Whether to exclude unset fields. exclude_defaults: Whether to exclude fields with default values. exclude_none: Whether to exclude fields with a value of `None`. round_trip: Whether to serialize and deserialize the instance to ensure round-tripping. warnings: Whether to emit serialization warnings. Returns: The JSON representation of the given instance as bytes. """ return self.serializer.to_json( __instance, indent=indent, include=include, exclude=exclude, by_alias=by_alias, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, round_trip=round_trip, warnings=warnings, ) def json_schema( self, *, by_alias: bool = True, ref_template: str = DEFAULT_REF_TEMPLATE, schema_generator: type[GenerateJsonSchema] = GenerateJsonSchema,
""" You may have types that are not `BaseModel`s that you want to validate data against. Or you may want to validate a `List[SomeModel]`, or dump it to JSON. For use cases like this, Pydantic provides [`TypeAdapter`][pydantic.type_adapter.TypeAdapter], which can be used for type validation, serialization, and JSON schema generation without creating a [`BaseModel`][pydantic.main.BaseModel]. A [`TypeAdapter`][pydantic.type_adapter.TypeAdapter] instance exposes some of the functionality from [`BaseModel`][pydantic.main.BaseModel] instance methods for types that do not have such methods (such as dataclasses, primitive types, and more): ```py from typing import List from typing_extensions import TypedDict from pydantic import TypeAdapter, ValidationError class User(TypedDict): name: str id: int UserListValidator = TypeAdapter(List[User]) print(repr(UserListValidator.validate_python([{'name': 'Fred', 'id': '3'}]))) #> [{'name': 'Fred', 'id': 3}] try: UserListValidator.validate_python( [{'name': 'Fred', 'id': 'wrong', 'other': 'no'}] ) except ValidationError as e: print(e) ''' 1 validation error for list[typed-dict] 0.id Input should be a valid integer, unable to parse string as an integer [type=int_parsing, input_value='wrong', input_type=str] ''' ``` Note: Despite some overlap in use cases with [`RootModel`][pydantic.root_model.RootModel], [`TypeAdapter`][pydantic.type_adapter.TypeAdapter] should not be used as a type annotation for specifying fields of a `BaseModel`, etc. ## Parsing data into a specified type [`TypeAdapter`][pydantic.type_adapter.TypeAdapter] can be used to apply the parsing logic to populate Pydantic models in a more ad-hoc way. This function behaves similarly to [`BaseModel.model_validate`][pydantic.main.BaseModel.model_validate], but works with arbitrary Pydantic-compatible types. This is especially useful when you want to parse results into a type that is not a direct subclass of [`BaseModel`][pydantic.main.BaseModel]. For example: ```py from typing import List from pydantic import BaseModel, TypeAdapter class Item(BaseModel): id: int name: str # `item_data` could come from an API call, eg., via something like: # item_data = requests.get('https://my-api.com/items').json() item_data = [{'id': 1, 'name': 'My Item'}] items = TypeAdapter(List[Item]).validate_python(item_data) print(items) #> [Item(id=1, name='My Item')] ``` [`TypeAdapter`][pydantic.type_adapter.TypeAdapter] is capable of parsing data into any of the types Pydantic can handle as fields of a [`BaseModel`][pydantic.main.BaseModel]. """ # noqa: D212 from __future__ import annotations as _annotations T = TypeVar('T') if TYPE_CHECKING: # should be `set[int] | set[str] | dict[int, IncEx] | dict[str, IncEx] | None`, but mypy can't cope IncEx = Union[Set[int], Set[str], Dict[int, Any], Dict[str, Any]] def _get_schema(type_: Any, config_wrapper: _config.ConfigWrapper, parent_depth: int) -> CoreSchema: """`BaseModel` uses its own `__module__` to find out where it was defined and then look for symbols to resolve forward references in those globals. On the other hand this function can be called with arbitrary objects, including type aliases where `__module__` (always `typing.py`) is not useful. So instead we look at the globals in our parent stack frame. This works for the case where this function is called in a module that has the target of forward references in its scope, but does not work for more complex cases. For example, take the following: a.py ```python from typing import Dict, List IntList = List[int] OuterDict = Dict[str, 'IntList'] ``` b.py ```python test="skip" from a import OuterDict from pydantic import TypeAdapter IntList = int # replaces the symbol the forward reference is looking for v = TypeAdapter(OuterDict) v({'x': 1}) # should fail but doesn't ``` If OuterDict were a `BaseModel`, this would work because it would resolve the forward reference within the `a.py` namespace. But `TypeAdapter(OuterDict)` can't know what module OuterDict came from. In other words, the assumption that _all_ forward references exist in the module we are being called from is not technically always true. Although most of the time it is and it works fine for recursive models and such, `BaseModel`'s behavior isn't perfect either and _can_ break in similar ways, so there is no right or wrong between the two. But at the very least this behavior is _subtly_ different from `BaseModel`'s. """ local_ns = _typing_extra.parent_frame_namespace(parent_depth=parent_depth) global_ns = sys._getframe(max(parent_depth - 1, 1)).f_globals.copy() global_ns.update(local_ns or {}) gen = _generate_schema.GenerateSchema(config_wrapper, types_namespace=global_ns, typevars_map={}) schema = gen.generate_schema(type_) schema = gen.collect_definitions(schema) return schema def _getattr_no_parents(obj: Any, attribute: str) -> Any: """Returns the attribute value without attempting to look up attributes from parent types.""" if hasattr(obj, '__dict__'): try: return obj.__dict__[attribute] except KeyError: pass slots = getattr(obj, '__slots__', None) if slots is not None and attribute in slots: return getattr(obj, attribute) else: raise AttributeError(attribute) class TypeAdapter(Generic[T]): """Type adapters provide a flexible way to perform validation and serialization based on a Python type. A `TypeAdapter` instance exposes some of the functionality from `BaseModel` instance methods for types that do not have such methods (such as dataclasses, primitive types, and more). Note that `TypeAdapter` is not an actual type, so you cannot use it in type annotations. Attributes: core_schema: The core schema for the type. validator (SchemaValidator): The schema validator for the type. serializer: The schema serializer for the type. """ if TYPE_CHECKING: @overload def __new__(cls, __type: type[T], *, config: ConfigDict | None = ...) -> TypeAdapter[T]: ... # this overload is for non-type things like Union[int, str] # Pyright currently handles this "correctly", but MyPy understands this as TypeAdapter[object] # so an explicit type cast is needed @overload def __new__(cls, __type: T, *, config: ConfigDict | None = ...) -> TypeAdapter[T]: ... def __new__(cls, __type: Any, *, config: ConfigDict | None = ...) -> TypeAdapter[T]: """A class representing the type adapter.""" raise NotImplementedError @overload def __init__(self, type: type[T], *, config: ConfigDict | None = None, _parent_depth: int = 2) -> None: ... # this overload is for non-type things like Union[int, str] # Pyright currently handles this "correctly", but MyPy understands this as TypeAdapter[object] # so an explicit type cast is needed @overload def __init__(self, type: T, *, config: ConfigDict | None = None, _parent_depth: int = 2) -> None: ... def __init__(self, type: Any, *, config: ConfigDict | None = None, _parent_depth: int = 2) -> None: """Initializes the TypeAdapter object.""" config_wrapper = _config.ConfigWrapper(config) try: type_has_config = issubclass(type, BaseModel) or is_dataclass(type) or is_typeddict(type) except TypeError: # type is not a class type_has_config = False if type_has_config and config is not None: raise PydanticUserError( 'Cannot use `config` when the type is a BaseModel, dataclass or TypedDict.' ' These types can have their own config and setting the config via the `config`' ' parameter to TypeAdapter will not override it, thus the `config` you passed to' ' TypeAdapter becomes meaningless, which is probably not what you want.', code='type-adapter-config-unused', ) core_schema: CoreSchema try: core_schema = _getattr_no_parents(type, '__pydantic_core_schema__') except AttributeError: core_schema = _get_schema(type, config_wrapper, parent_depth=_parent_depth + 1) core_schema = _discriminated_union.apply_discriminators(_core_utils.simplify_schema_references(core_schema)) core_schema = _core_utils.validate_core_schema(core_schema) core_config = config_wrapper.core_config(None) validator: SchemaValidator try: validator = _getattr_no_parents(type, '__pydantic_validator__') except AttributeError: validator = create_schema_validator(core_schema, core_config, config_wrapper.plugin_settings) serializer: SchemaSerializer try: serializer = _getattr_no_parents(type, '__pydantic_serializer__') except AttributeError: serializer = SchemaSerializer(core_schema, core_config) self.core_schema = core_schema self.validator = validator self.serializer = serializer def validate_python( self, __object: Any, *, strict: bool | None = None, from_attributes: bool | None = None, context: dict[str, Any] | None = None, ) -> T: """Validate a Python object against the model. Args: __object: The Python object to validate against the model. strict: Whether to strictly check types. from_attributes: Whether to extract data from object attributes. context: Additional context to pass to the validator. Returns: The validated object. """ return self.validator.validate_python(__object, strict=strict, from_attributes=from_attributes, context=context) def validate_json( self, __data: str | bytes, *, strict: bool | None = None, context: dict[str, Any] | None = None ) -> T: """Validate a JSON string or bytes against the model. Args: __data: The JSON data to validate against the model. strict: Whether to strictly check types. context: Additional context to use during validation. Returns: The validated object. """ return self.validator.validate_json(__data, strict=strict, context=context) def validate_strings(self, __obj: Any, *, strict: bool | None = None, context: dict[str, Any] | None = None) -> T: """Validate object contains string data against the model. Args: __obj: The object contains string data to validate. strict: Whether to strictly check types. context: Additional context to use during validation. Returns: The validated object. """ return self.validator.validate_strings(__obj, strict=strict, context=context) def get_default_value(self, *, strict: bool | None = None, context: dict[str, Any] | None = None) -> Some[T] | None: """Get the default value for the wrapped type. Args: strict: Whether to strictly check types. context: Additional context to pass to the validator. Returns: The default value wrapped in a `Some` if there is one or None if not. """ return self.validator.get_default_value(strict=strict, context=context) def dump_python( self, __instance: T, *, mode: Literal['json', 'python'] = 'python', include: IncEx | None = None, exclude: IncEx | None = None, by_alias: bool = False, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, round_trip: bool = False, warnings: bool = True, ) -> Any: """Dump an instance of the adapted type to a Python object. Args: __instance: The Python object to serialize. mode: The output format. include: Fields to include in the output. exclude: Fields to exclude from the output. by_alias: Whether to use alias names for field names. exclude_unset: Whether to exclude unset fields. exclude_defaults: Whether to exclude fields with default values. exclude_none: Whether to exclude fields with None values. round_trip: Whether to output the serialized data in a way that is compatible with deserialization. warnings: Whether to display serialization warnings. Returns: The serialized object. """ return self.serializer.to_python( __instance, mode=mode, by_alias=by_alias, include=include, exclude=exclude, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, round_trip=round_trip, warnings=warnings, ) def dump_json( self, __instance: T, *, indent: int | None = None, include: IncEx | None = None, exclude: IncEx | None = None, by_alias: bool = False, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, round_trip: bool = False, warnings: bool = True, ) -> bytes: """Serialize an instance of the adapted type to JSON. Args: __instance: The instance to be serialized. indent: Number of spaces for JSON indentation. include: Fields to include. exclude: Fields to exclude. by_alias: Whether to use alias names for field names. exclude_unset: Whether to exclude unset fields. exclude_defaults: Whether to exclude fields with default values. exclude_none: Whether to exclude fields with a value of `None`. round_trip: Whether to serialize and deserialize the instance to ensure round-tripping. warnings: Whether to emit serialization warnings. Returns: The JSON representation of the given instance as bytes. """ return self.serializer.to_json( __instance, indent=indent, include=include, exclude=exclude, by_alias=by_alias, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, round_trip=round_trip, warnings=warnings, ) def json_schema( self, *, by_alias: bool = True, ref_template: str = DEFAULT_REF_TEMPLATE, schema_generator: type[GenerateJsonSchema] = GenerateJsonSchema,
mode: JsonSchemaMode = 'validation',
6
2023-10-23 18:09:28+00:00
24k
zju3dv/nr_in_a_room
data_gen/batch_real_scene_neural_render.py
[ { "identifier": "read_json", "path": "utils/util.py", "snippet": "def read_json(fname):\n fname = Path(fname)\n with fname.open(\"rt\") as handle:\n return json.load(handle, object_hook=OrderedDict)" }, { "identifier": "read_yaml", "path": "utils/util.py", "snippet": "def re...
import sys import os import torch import numpy as np import imageio import time import cv2 from tqdm import tqdm from argparse import ArgumentParser from utils.util import read_json, read_yaml from optim.room_optimizer import RoomOptimizer from optim.misc_utils import read_real_scene_localization, read_testing_config from scipy.spatial.transform import Rotation
14,984
os.environ["OMP_NUM_THREADS"] = "1" # noqa os.environ["MKL_NUM_THREADS"] = "1" # noqa sys.path.append(".") # noqa def render_frame(config, target_dir): # or load from config active_instance_id = config.active_instance_id dataset_config = config.dataset_config["dataset"] scene_info_json_path = config.scene_info_json active_instance_id = [0] for obj_info in read_json(scene_info_json_path)["objs"]: active_instance_id += [obj_info["id"]] bg_scale_factor = 1 bg_scene_center = [0, 0, 0] if config.bg_dataset_config != "": bg_dataset_config = config.bg_dataset_config["dataset"] bg_scale_factor = bg_dataset_config["scale_factor"] bg_scene_center = bg_dataset_config["scene_center"] # intialize room optimizer room_optimizer = RoomOptimizer( scene_info_json_path=scene_info_json_path, scale_factor=dataset_config["scale_factor"], scale_factor_dict=dataset_config.get("scale_factor_dict", {}), bg_scale_factor=bg_scale_factor, bg_scene_center=bg_scene_center, img_wh=config.img_wh, near=0.3, far=10.0, chunk=config.chunk, model_ckpt_path_dict=config.ckpt_path_dict, active_instance_id=active_instance_id, use_amp=True, use_light_from_image_attr=True, # we use fixed light code (e.g. probe_03) optimize_appearance_code=config.get("optimize_appearance_code", False), use_appearance_from_image_attr=True, ) # initialize object poses with no noise room_optimizer.set_initial_object_poses_from_scene_meta(add_noise=False) # we show an example to use pose scene_meta = read_json(scene_info_json_path) # localization_info = read_real_scene_localization( # "/mnt/nas_54/group/BBYang/neural_scene_capture_360/capture_1104/processed/arrangement_panorama_select/arrangement1/traj.txt", # "data/real_room_0/objects/000/background_hloc_neus_normal_converge/transform_info.json", # ) pose = np.array(scene_meta["camera"]["cam3d2world"]).reshape(4, 4) # Original poses has rotation in form "right down forward", change to NDC "right up back" fix_rot = np.array([1, 0, 0, 0, -1, 0, 0, 0, -1]).reshape(3, 3) pose[:3, :3] = pose[:3, :3] @ fix_rot # from scipy.spatial.transform import Rotation as R # print(pose) # rot_fix_loc = np.array([0, 1, 0, 1, 0, 0, 0, 0, -1]).reshape(3, 3) # pose[:3, :3] = pose[:3, :3] @ rot_fix_loc # pose[:3, :3] = rot_fix_loc @ pose[:3, :3] t1 = time.time() # image_np = room_optimizer.render_full_scene( # pose=pose, # idx=-1, # return_raw_image=True, # refine_edge=True, # # use_sphere_tracing=False, # use_sphere_tracing=True, # ) image_np, mask_np = room_optimizer.render_full_scene( pose=pose, idx=-1, return_raw_image=True, refine_edge=False, # refine_edge=True, # use_sphere_tracing=False, use_sphere_tracing=True, render_mask=True, ) t2 = time.time() print(f"Rendering finish in {t2-t1} s.") os.makedirs("debug", exist_ok=True) imageio.imwrite(f"{target_dir}/rgb.png", image_np) cv2.imwrite(f"{target_dir}/seg.png", mask_np) if __name__ == "__main__": """ Example: python test/test_neural_scene_renderer.py \ config=test/config/ig_bedroom.yml \ "img_wh=[1024,512]" \ base_dir=data/real_room_rand_arrangement """
os.environ["OMP_NUM_THREADS"] = "1" # noqa os.environ["MKL_NUM_THREADS"] = "1" # noqa sys.path.append(".") # noqa def render_frame(config, target_dir): # or load from config active_instance_id = config.active_instance_id dataset_config = config.dataset_config["dataset"] scene_info_json_path = config.scene_info_json active_instance_id = [0] for obj_info in read_json(scene_info_json_path)["objs"]: active_instance_id += [obj_info["id"]] bg_scale_factor = 1 bg_scene_center = [0, 0, 0] if config.bg_dataset_config != "": bg_dataset_config = config.bg_dataset_config["dataset"] bg_scale_factor = bg_dataset_config["scale_factor"] bg_scene_center = bg_dataset_config["scene_center"] # intialize room optimizer room_optimizer = RoomOptimizer( scene_info_json_path=scene_info_json_path, scale_factor=dataset_config["scale_factor"], scale_factor_dict=dataset_config.get("scale_factor_dict", {}), bg_scale_factor=bg_scale_factor, bg_scene_center=bg_scene_center, img_wh=config.img_wh, near=0.3, far=10.0, chunk=config.chunk, model_ckpt_path_dict=config.ckpt_path_dict, active_instance_id=active_instance_id, use_amp=True, use_light_from_image_attr=True, # we use fixed light code (e.g. probe_03) optimize_appearance_code=config.get("optimize_appearance_code", False), use_appearance_from_image_attr=True, ) # initialize object poses with no noise room_optimizer.set_initial_object_poses_from_scene_meta(add_noise=False) # we show an example to use pose scene_meta = read_json(scene_info_json_path) # localization_info = read_real_scene_localization( # "/mnt/nas_54/group/BBYang/neural_scene_capture_360/capture_1104/processed/arrangement_panorama_select/arrangement1/traj.txt", # "data/real_room_0/objects/000/background_hloc_neus_normal_converge/transform_info.json", # ) pose = np.array(scene_meta["camera"]["cam3d2world"]).reshape(4, 4) # Original poses has rotation in form "right down forward", change to NDC "right up back" fix_rot = np.array([1, 0, 0, 0, -1, 0, 0, 0, -1]).reshape(3, 3) pose[:3, :3] = pose[:3, :3] @ fix_rot # from scipy.spatial.transform import Rotation as R # print(pose) # rot_fix_loc = np.array([0, 1, 0, 1, 0, 0, 0, 0, -1]).reshape(3, 3) # pose[:3, :3] = pose[:3, :3] @ rot_fix_loc # pose[:3, :3] = rot_fix_loc @ pose[:3, :3] t1 = time.time() # image_np = room_optimizer.render_full_scene( # pose=pose, # idx=-1, # return_raw_image=True, # refine_edge=True, # # use_sphere_tracing=False, # use_sphere_tracing=True, # ) image_np, mask_np = room_optimizer.render_full_scene( pose=pose, idx=-1, return_raw_image=True, refine_edge=False, # refine_edge=True, # use_sphere_tracing=False, use_sphere_tracing=True, render_mask=True, ) t2 = time.time() print(f"Rendering finish in {t2-t1} s.") os.makedirs("debug", exist_ok=True) imageio.imwrite(f"{target_dir}/rgb.png", image_np) cv2.imwrite(f"{target_dir}/seg.png", mask_np) if __name__ == "__main__": """ Example: python test/test_neural_scene_renderer.py \ config=test/config/ig_bedroom.yml \ "img_wh=[1024,512]" \ base_dir=data/real_room_rand_arrangement """
config = read_testing_config()
4
2023-10-15 08:41:29+00:00
24k
WenzhengZhang/Seq2seqCoref
main_trainer.py
[ { "identifier": "DataArguments", "path": "arguments.py", "snippet": "class DataArguments:\n data_dir: Optional[str] = field(\n default=None, metadata={\"help\": \"Path to data directory\"}\n )\n\n max_train_len: Optional[int] = field(\n default=1536,\n metadata={\n ...
import logging import os import sys from transformers import HfArgumentParser, set_seed from transformers import AutoModelForSeq2SeqLM, \ DataCollatorForSeq2Seq, AutoConfig, AutoTokenizer from transformers.integrations import TensorBoardCallback from arguments import DataArguments, ModelArguments, CorefTrainingArguments \ as TrainingArguments from data import CorefDataset, JointDataset from constants import SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END, \ COPY, CLUSTER_NEW, CLUSTERS, SENTENCE_START, SENTENCE_END, SPECIAL_IDS, \ NON_INT_SPECIAL_IDS, MARK_SPECIAL_IDS, MENTION_END_NON_INT_SPECIAL_IDS, \ MENTION_ENDS from trainer import CorefTrainer from data import ConstrainedDataCollator from model import ConstrainedT5
20,328
logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler(sys.stdout)) def main(): parser = HfArgumentParser( (ModelArguments, DataArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): model_args, data_args, training_args = parser.parse_json_file( json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() model_args: ModelArguments data_args: DataArguments training_args: TrainingArguments if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, fp16 training: %s, bf16 training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, training_args.bf16, ) logger.info("Training/evaluation parameters %s", training_args) logger.info("MODEL parameters %s", model_args) logger.info("Data arguments %s", data_args) set_seed(training_args.seed) tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path) if training_args.action_type == "integer": num_new_tokens = tokenizer.add_tokens([SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END, COPY]) elif training_args.action_type == "non_integer": if training_args.add_mention_end: num_new_tokens = tokenizer.add_tokens([SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END, COPY, CLUSTER_NEW] +
logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler(sys.stdout)) def main(): parser = HfArgumentParser( (ModelArguments, DataArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): model_args, data_args, training_args = parser.parse_json_file( json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() model_args: ModelArguments data_args: DataArguments training_args: TrainingArguments if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, fp16 training: %s, bf16 training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, training_args.bf16, ) logger.info("Training/evaluation parameters %s", training_args) logger.info("MODEL parameters %s", model_args) logger.info("Data arguments %s", data_args) set_seed(training_args.seed) tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path) if training_args.action_type == "integer": num_new_tokens = tokenizer.add_tokens([SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END, COPY]) elif training_args.action_type == "non_integer": if training_args.add_mention_end: num_new_tokens = tokenizer.add_tokens([SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END, COPY, CLUSTER_NEW] +
CLUSTERS)
11
2023-10-17 17:39:16+00:00
24k
giulio98/functional-diffusion-processes
src/functional_diffusion_processes/trainers/trainer.py
[ { "identifier": "AudioDataset", "path": "src/functional_diffusion_processes/datasets/audio_dataset.py", "snippet": "class AudioDataset(BaseDataset, abc.ABC):\n \"\"\"Base class for defining audio datasets.\n\n This class serves as the foundation for defining datasets containing audio data.\n It...
import abc import gc import io import logging import os import flax import flax.jax_utils as flax_utils import hydra.utils import jax import numpy as np import tensorflow as tf import wandb from typing import Any, Callable, Tuple, Union from cleanfid import fid from flax import linen, traverse_util from flax.training import checkpoints from flax.training.checkpoints import restore_checkpoint from jax import numpy as jnp from omegaconf import DictConfig, OmegaConf from tqdm.auto import tqdm from wandb.sdk.lib import RunDisabled from wandb.sdk.wandb_run import Run from ..datasets import AudioDataset, ImageDataset from ..datasets.base_dataset import BaseDataset from ..losses.base_loss import Loss from ..metrics import FIDMetric from ..samplers import Sampler from ..sdetools.base_sde import SDE from ..utils.common import filter_mask, make_grid_image, process_images, save_samples, to_grayscale from ..utils.scaler import get_data_inverse_scaler, get_data_scaler from ..utils.training_state import TrainState from .helpers import colorizing_fn, construct_sampling_fn, construct_train_step, inpainting_fn, sampling_fn
14,855
): self.save_checkpoint(step, run, state) # Evaluate the model if self.training_config.sampling and (step % self.training_config.eval_freq == 0): # if step != 0: if jax.host_id() == 0: pylogger.info("Generating samples at step %d." % (step,)) _, *sample_rng = jax.random.split(rng, jax.local_device_count() + 1) _, b, g, c = batch.shape sample_rng = jnp.asarray(sample_rng) if self.training_config.sampling_type == "full": batch_sampled, batch_sampled_last, batch_sampled_all = sampling_fn( sample_fn, (sample_rng, state), p_batch_input ) elif self.training_config.sampling_type == "colorization": batch_grayscale = to_grayscale(batch) batch_grayscale = batch_grayscale.reshape(-1, b, g, 1) batch_sampled, batch_sampled_last, batch_sampled_all = colorizing_fn( sample_fn, (sample_rng, state), p_batch_input, batch_grayscale ) elif self.training_config.sampling_type == "inpainting": config_object = OmegaConf.create( { "_target_": "functional_diffusion_processes.datasets.mnist_dataset.MNISTDataset", "data_config": { "seed": 42, "batch_size": ds_train.data_config.batch_size, "image_height_size": ds_train.data_config.image_height_size, "image_width_size": ds_train.data_config.image_width_size, "output_size": 1, "random_flip": False, "uniform_dequantization": False, "data_centered": False, "data_dir": "${oc.env:DATA_ROOT}/tensorflow_datasets", "download": True, "is_mask": True, }, "split": "train", "evaluation": False, } ) ds_mask = hydra.utils.instantiate(config_object, _recursive_=False) ds_mask_iter = iter(ds_mask) batch_masked = jax.tree_map(f=lambda x: x._numpy(), tree=next(ds_mask_iter)["data"]) batch_sampled, batch_sampled_last, batch_sampled_all = inpainting_fn( sample_fn, (sample_rng, state), p_batch_input, (batch * batch_masked), batch_masked ) elif self.training_config.sampling_type == "deblurring": n_rows, n_cols = ds_train.data_config.image_height_size, ds_train.data_config.image_width_size batch_masked = filter_mask(batch.reshape(-1, b, n_rows, n_cols, c).shape, radius=10) batch_freq = jnp.fft.fftshift( jnp.fft.fft2(batch.reshape(-1, b, n_rows, n_cols, c), axes=(2, 3)), axes=(2, 3), ) batch_freq = batch_freq * batch_masked batch_blurred = jnp.real(jnp.fft.ifft2(jnp.fft.ifftshift(batch_freq, axes=(2, 3)), axes=(2, 3))) batch_blurred = batch_blurred.reshape(-1, b, g, c) batch_masked = batch_masked.reshape(-1, b, g, c) batch_sampled, batch_sampled_last, batch_sampled_all = inpainting_fn( sample_fn, (sample_rng, state), p_batch_input, batch_blurred, batch_masked ) if jax.host_id() == 0 and self.logging.use_wandb: if isinstance(ds_train, ImageDataset): this_sample_dir = os.path.join( self.sample_dir, "iter_{}_host_{}".format(step, jax.host_id()), ) tf.io.gfile.makedirs(this_sample_dir) # code below to show the gif of the sampled images # processed_images = [] # for n in range(batch_sampled_all.shape[1]): # batch_sampled_i = batch_sampled_all[:, n, :, :, :] # batch_sampled_i = ds_train.postprocess_fn( # batch_data=batch_sampled_i, inverse_scaler=inverse_scaler # ) # processed_images.append(np.asarray(batch_sampled_i)) # # # Log the sampled images as a GIF # imageio.mimwrite( # os.path.join(this_sample_dir, "image_sequence.gif"), # processed_images, # fps=10, # ) # gif_wandb = wandb.Image( # os.path.join(this_sample_dir, "image_sequence.gif"), # caption="Sampled_all_gif", # ) # wandb.log({"Sampled_all_gif": gif_wandb}, step=step) batch_sampled = ds_train.postprocess_fn(batch_data=batch_sampled, inverse_scaler=inverse_scaler) batch_sampled_last = ds_train.postprocess_fn( batch_data=batch_sampled_last, inverse_scaler=inverse_scaler ) batch_real = ds_train.postprocess_fn( batch_data=batch.reshape(-1, b, g, c), inverse_scaler=inverse_scaler ) if not self.training_config.sampling_only: batch_target = ds_train.postprocess_fn( batch_data=target.reshape(-1, b, g, c), inverse_scaler=inverse_scaler ) if isinstance(ds_train, ImageDataset): data_sampled = wandb.Image(np.asarray(batch_sampled), caption="Sampled") data_sampled_rec = wandb.Image(np.asarray(batch_sampled_last), caption="Sampled Rec") data_real = wandb.Image(np.asarray(batch_real), caption="Real") if not self.training_config.sampling_only: data_target = wandb.Image(np.asarray(batch_target), caption="Target")
# import imageio # import imageio pylogger = logging.getLogger(__name__) class Trainer(abc.ABC): """Class for training a model.""" def __init__( self, mode: str, model_name: str, training_config: DictConfig, optimizer, evaluation_config: DictConfig, trainer_logging: DictConfig, sampler: Sampler, loss_obj: Loss, ) -> None: """Initialize a Trainer instance with configurations and core components. Args: mode (str): Specifies the mode of the trainer which can be either "train" or "eval". model_name (str): The name identifier for the model. training_config (DictConfig): A configuration dictionary for training settings. optimizer: The optimizer instance used for training. evaluation_config (DictConfig): A configuration dictionary for evaluation settings. trainer_logging (DictConfig): A configuration dictionary for logging settings. sampler (Sampler): A sampler instance for sampling from the model. loss_obj (Loss): A loss object used for computing the loss during training. """ self.mode = mode self.model_name = model_name self.training_config = training_config self.optimizer = hydra.utils.instantiate(optimizer) self.evaluation_config = evaluation_config self.logging = trainer_logging self.sampler = sampler self.loss_obj = loss_obj self.checkpoint_dir = os.path.join(self.training_config.save_dir, self.training_config.checkpoint_dir) self.sample_dir = os.path.join(self.training_config.save_dir, self.training_config.sample_dir) self.eval_dir = os.path.join(self.training_config.save_dir, self.evaluation_config.eval_dir) # Create the directories for saving samples and checkpoints tf.io.gfile.makedirs(self.checkpoint_dir) tf.io.gfile.makedirs(self.sample_dir) tf.io.gfile.makedirs(self.eval_dir) tf.io.gfile.makedirs(os.path.join(self.eval_dir, "clean")) def initialize_wandb( self, dataset_config: DictConfig, sde_config: DictConfig, model_config: DictConfig ) -> Union[Run, RunDisabled, None]: """Initialize wandb if logging is enabled.""" if self.logging.use_wandb: run = wandb.init( name=os.path.basename(self.logging.wandb_init.name), project=self.logging.wandb_init.project, entity=self.logging.wandb_init.entity, save_code=self.logging.wandb_init.save_code, config={ **self.training_config, **dataset_config, **sde_config, **model_config, }, ) else: run = None return run def initialize_run(self, model, ds_train, sde): """Perform all initialization steps required for training.""" run = self.initialize_wandb(ds_train.data_config, sde.sde_config, model.model_config) scaler = get_data_scaler(is_centered=ds_train.data_config.data_centered) inverse_scaler = get_data_inverse_scaler(is_centered=ds_train.data_config.data_centered) rng = jax.random.PRNGKey(seed=self.training_config.seed) rng, step_rng = jax.random.split(rng) batch_input = model.initialize_input( (ds_train.data_config.batch_size, *sde.sde_config.shape, ds_train.data_config.output_size) ) params = jax.jit(model.initialize_model, backend="cpu")(step_rng, batch_input) flat_params = traverse_util.flatten_dict(params).values() tot_params = sum([jnp.size(p) for p in flat_params]) pylogger.info("Total number of parameters: {:.2f}M".format(tot_params / 1e6)) state = TrainState.create( apply_fn=model.apply, params=params, tx=self.optimizer, opt_state_params=self.optimizer.init(params), rng=rng, ema_params=params, ) train_step_fn = construct_train_step(self.optimizer, self.loss_obj.construct_loss_fn(model)) sample_fn = construct_sampling_fn(model, self.sampler) # Resume training when intermediate checkpoints are detected if self.training_config.resume_training: pylogger.warning("Resuming training from the latest checkpoint.") if self.logging.use_wandb and self.model_name != "local": model_file = wandb.use_artifact(self.model_name).download() state = restore_checkpoint(ckpt_dir=model_file, prefix="checkpoint_", target=state) else: state = checkpoints.restore_checkpoint(ckpt_dir=self.checkpoint_dir, target=state) return run, scaler, inverse_scaler, rng, state, train_step_fn, sample_fn, batch_input def train_step( self, train_step_fn: Callable, carry_state: Tuple, batch: jnp.ndarray, batch_input: jnp.ndarray, ) -> Tuple: """Perform a single training step, updating the model parameters. Args: train_step_fn (Callable): The train step function. carry_state (Tuple): The current state of the model and optimizer. batch (jnp.ndarray): The batch of data used for training. batch_input (jnp.ndarray): The input data to the model. Returns: Tuple: The updated state after performing the training step. """ (rng, state) = carry_state ( new_rng, loss, loss_inner, new_params, new_optim_state, batch_reconstructed, batch_corrupted, target, ) = train_step_fn( rng, state.params, state.opt_state_params, state.step, batch_input, batch, ) ema_rate = self.training_config.ema_rate new_params_ema = jax.tree_map( lambda p_ema, p: p_ema * ema_rate + p * (1.0 - ema_rate), state.ema_params, new_params, ) # update the state new_state = state.replace( rng=flax.jax_utils.unreplicate(new_rng), step=state.step + 1, opt_state_params=new_optim_state, params=new_params, ema_params=new_params_ema, ) new_carry_state = (new_rng, new_state) loss = flax.jax_utils.unreplicate(loss) step = int(flax_utils.unreplicate(state.step)) # Log the training progress if jax.host_id() == 0 and step % self.training_config.log_freq == 0: pylogger.info("step: %d, training_loss: %.5e" % (step, loss)) if self.logging.use_wandb: wandb.log({"step": step, "loss": loss}, step=step) if loss_inner is not None: loss_inner = flax.jax_utils.unreplicate(loss_inner) for inner_step, loss in enumerate(loss_inner): pylogger.info("step: %d, training_loss_inner: %.5e" % (step, loss)) if self.logging.use_wandb: wandb.log({"step": step, f"loss inner step {inner_step}": loss}, step=step) return new_carry_state, batch_reconstructed, batch_corrupted, target def save_checkpoint(self, step, run, state): pylogger.info("Saving the model at step %d." % (step,)) # Log the evaluation progress # Save the model parameters ( params, opt_state_params, step_, ema_params, ) = flax_utils.unreplicate( ( state.params, state.opt_state_params, state.step, state.ema_params, ) ) saved_state = state.replace( step=step_, opt_state_params=opt_state_params, params=params, ema_params=ema_params, ) checkpoint_file = checkpoints.save_checkpoint( self.checkpoint_dir, saved_state, step=step_ // self.training_config.eval_freq, keep=np.inf, ) if self.logging.use_wandb: wandb_model_artifact_name = str(step_) + "_" + run.id wandb_model = wandb.Artifact(wandb_model_artifact_name, type="model") wandb_model.add_file(checkpoint_file) run.log_artifact(wandb_model) # noinspection PyProtectedMember def train(self, model: linen.Module, ds_train: BaseDataset, sde: SDE) -> None: """Train the model with optional evaluation and logging. This method encapsulates the entire training process including initialization, training loop, checkpointing, evaluation, and logging. It supports different sampling types like colorization, inpainting, super resolution, and deblurring. Args: model (linen.Module): The model to be trained. ds_train (BaseDataset): The training dataset. sde (SDE): Stochastic differential equation object, governing the dynamics for sampling. Raises: ValueError: If an unsupported dataset type is provided. Note: The method leverages the Weights & Biases (wandb) platform for logging and checkpointing, make sure it's configured properly if logging is enabled. """ run, scaler, inverse_scaler, rng, state, train_step_fn, sample_fn, batch_input = self.initialize_run( model, ds_train, sde ) # `state.step` is JAX integer on the GPU/TPU devices start_step = int(state.step) rng = state.rng # Replicate the train state on all devices ( p_params, p_opt_state_params, p_step, p_ema_params, p_batch_input, ) = flax_utils.replicate( ( state.params, state.opt_state_params, state.step, state.ema_params, batch_input, ) ) # update the TrainState with replicated parameters and optimizer state state = state.replace( params=p_params, opt_state_params=p_opt_state_params, step=p_step, ema_params=p_ema_params, ) if jax.host_id() == 0: pylogger.info("Starting training loop at step %d." % (start_step,)) rng = jax.random.fold_in(rng, jax.host_id()) assert ( self.training_config.log_freq % self.training_config.n_jitted_steps == 0 and self.training_config.eval_freq % self.training_config.n_jitted_steps == 0 ), "Missing logs or checkpoints!" ds_train_iter = iter(ds_train) with tqdm( total=self.training_config.total_steps + 1, initial=start_step, position=0, leave=True, ) as pbar: for step in range( start_step, self.training_config.total_steps + 1, self.training_config.n_jitted_steps, ): # Get the next batch of data and scale it batch = jax.tree_map(f=lambda x: scaler(x._numpy()), tree=next(ds_train_iter)["data"]) if not self.training_config.sampling_only: # Split the random number generator for the current step rng, *next_rng = jax.random.split(key=rng, num=jax.local_device_count() + 1) next_rng = jnp.asarray(next_rng) ((_, state), batch_reconstructed, batch_corrupted, target) = self.train_step( train_step_fn=train_step_fn, carry_state=(next_rng, state), batch=batch, batch_input=p_batch_input, ) if not self.training_config.sampling_only and ( (jax.host_id() == 0 and step % self.training_config.checkpoint_freq == 0 and step != 0) ): self.save_checkpoint(step, run, state) # Evaluate the model if self.training_config.sampling and (step % self.training_config.eval_freq == 0): # if step != 0: if jax.host_id() == 0: pylogger.info("Generating samples at step %d." % (step,)) _, *sample_rng = jax.random.split(rng, jax.local_device_count() + 1) _, b, g, c = batch.shape sample_rng = jnp.asarray(sample_rng) if self.training_config.sampling_type == "full": batch_sampled, batch_sampled_last, batch_sampled_all = sampling_fn( sample_fn, (sample_rng, state), p_batch_input ) elif self.training_config.sampling_type == "colorization": batch_grayscale = to_grayscale(batch) batch_grayscale = batch_grayscale.reshape(-1, b, g, 1) batch_sampled, batch_sampled_last, batch_sampled_all = colorizing_fn( sample_fn, (sample_rng, state), p_batch_input, batch_grayscale ) elif self.training_config.sampling_type == "inpainting": config_object = OmegaConf.create( { "_target_": "functional_diffusion_processes.datasets.mnist_dataset.MNISTDataset", "data_config": { "seed": 42, "batch_size": ds_train.data_config.batch_size, "image_height_size": ds_train.data_config.image_height_size, "image_width_size": ds_train.data_config.image_width_size, "output_size": 1, "random_flip": False, "uniform_dequantization": False, "data_centered": False, "data_dir": "${oc.env:DATA_ROOT}/tensorflow_datasets", "download": True, "is_mask": True, }, "split": "train", "evaluation": False, } ) ds_mask = hydra.utils.instantiate(config_object, _recursive_=False) ds_mask_iter = iter(ds_mask) batch_masked = jax.tree_map(f=lambda x: x._numpy(), tree=next(ds_mask_iter)["data"]) batch_sampled, batch_sampled_last, batch_sampled_all = inpainting_fn( sample_fn, (sample_rng, state), p_batch_input, (batch * batch_masked), batch_masked ) elif self.training_config.sampling_type == "deblurring": n_rows, n_cols = ds_train.data_config.image_height_size, ds_train.data_config.image_width_size batch_masked = filter_mask(batch.reshape(-1, b, n_rows, n_cols, c).shape, radius=10) batch_freq = jnp.fft.fftshift( jnp.fft.fft2(batch.reshape(-1, b, n_rows, n_cols, c), axes=(2, 3)), axes=(2, 3), ) batch_freq = batch_freq * batch_masked batch_blurred = jnp.real(jnp.fft.ifft2(jnp.fft.ifftshift(batch_freq, axes=(2, 3)), axes=(2, 3))) batch_blurred = batch_blurred.reshape(-1, b, g, c) batch_masked = batch_masked.reshape(-1, b, g, c) batch_sampled, batch_sampled_last, batch_sampled_all = inpainting_fn( sample_fn, (sample_rng, state), p_batch_input, batch_blurred, batch_masked ) if jax.host_id() == 0 and self.logging.use_wandb: if isinstance(ds_train, ImageDataset): this_sample_dir = os.path.join( self.sample_dir, "iter_{}_host_{}".format(step, jax.host_id()), ) tf.io.gfile.makedirs(this_sample_dir) # code below to show the gif of the sampled images # processed_images = [] # for n in range(batch_sampled_all.shape[1]): # batch_sampled_i = batch_sampled_all[:, n, :, :, :] # batch_sampled_i = ds_train.postprocess_fn( # batch_data=batch_sampled_i, inverse_scaler=inverse_scaler # ) # processed_images.append(np.asarray(batch_sampled_i)) # # # Log the sampled images as a GIF # imageio.mimwrite( # os.path.join(this_sample_dir, "image_sequence.gif"), # processed_images, # fps=10, # ) # gif_wandb = wandb.Image( # os.path.join(this_sample_dir, "image_sequence.gif"), # caption="Sampled_all_gif", # ) # wandb.log({"Sampled_all_gif": gif_wandb}, step=step) batch_sampled = ds_train.postprocess_fn(batch_data=batch_sampled, inverse_scaler=inverse_scaler) batch_sampled_last = ds_train.postprocess_fn( batch_data=batch_sampled_last, inverse_scaler=inverse_scaler ) batch_real = ds_train.postprocess_fn( batch_data=batch.reshape(-1, b, g, c), inverse_scaler=inverse_scaler ) if not self.training_config.sampling_only: batch_target = ds_train.postprocess_fn( batch_data=target.reshape(-1, b, g, c), inverse_scaler=inverse_scaler ) if isinstance(ds_train, ImageDataset): data_sampled = wandb.Image(np.asarray(batch_sampled), caption="Sampled") data_sampled_rec = wandb.Image(np.asarray(batch_sampled_last), caption="Sampled Rec") data_real = wandb.Image(np.asarray(batch_real), caption="Real") if not self.training_config.sampling_only: data_target = wandb.Image(np.asarray(batch_target), caption="Target")
elif isinstance(ds_train, AudioDataset):
0
2023-10-24 22:01:35+00:00
24k
violet-sto/HN-GFN
main_mobo.py
[ { "identifier": "Dataset", "path": "dataset.py", "snippet": "class Dataset:\n\n def __init__(self, args, bpath, oracle, device):\n self.test_split_rng = np.random.RandomState(142857)\n self.train_rng = np.random.RandomState(int(time.time()))\n self.train_mols = []\n self.t...
from collections import defaultdict from dataset import Dataset from mol_mdp_ext import MolMDPExtended, BlockMoleculeDataExtended from oracle.oracle import Oracle from proxy import get_proxy from generator import TBGFlowNet, FMGFlowNet, MOReinforce from utils.utils import set_random_seed from utils.metrics import compute_success, compute_diversity, compute_novelty, compute_correlation, circle_points from utils.logging import get_logger from datetime import datetime from botorch.utils.multi_objective.hypervolume import Hypervolume from botorch.utils.sampling import sample_simplex from botorch.utils.transforms import normalize, unnormalize from torch.distributions.dirichlet import Dirichlet from main import RolloutWorker, get_test_mols from pymoo.util.ref_dirs import get_reference_directions from copy import deepcopy import random import os import re import argparse import json import time import threading import pdb import pickle import gzip import torch.multiprocessing as mp import torch.nn.functional as F import torch import pandas as pd import numpy as np import warnings
15,557
sampled_mols = [] sampled_raw_rewards = [] sampled_means = [] sampled_smis = [] while len(sampled_mols) < args.num_samples: rollout_worker.rollout(generator, use_rand_policy=False, weights=torch.tensor(weights).unsqueeze(0).to(args.device)) (raw_r, _, m, trajectory_stats, inflow) = rollout_worker.sampled_mols[-1] sampled_mols.append(m) sampled_raw_rewards.append(raw_r[0].item()) sampled_means.append(raw_r[1]) sampled_smis.append(m.smiles) idx_pick = np.argsort(sampled_raw_rewards)[::-1][:int(args.num_samples/len(rollout_worker.test_weights))] picked_mols.extend(np.array(sampled_mols)[idx_pick].tolist()) means.extend(np.array(sampled_means)[idx_pick].tolist()) smis.extend(np.array(sampled_smis)[idx_pick].tolist()) raw_rewards.extend(np.array(sampled_raw_rewards)[idx_pick].tolist()) raw_rewards_weight[str(weights.cpu())] = np.array(sampled_raw_rewards)[idx_pick].mean() raw_rewards_mean = np.mean(list(raw_rewards_weight.values())) assert len(picked_mols) == args.num_samples top_means = torch.tensor(means) scores_dict = oracle.batch_get_scores(picked_mols) scores = torch.tensor(pd.DataFrame.from_dict(scores_dict).values) test_loss = F.mse_loss(top_means, scores) hypervolume = Hypervolume(ref_point=torch.zeros(len(args.objectives))) volume = hypervolume.compute(top_means) volume_oracle = hypervolume.compute(scores) diversity = compute_diversity(picked_mols) batch_metrics = {'Hypervolume_reward': volume, 'Hypervolume_oracle': volume_oracle, 'Reward_mean': raw_rewards_mean, 'scores_max': pd.DataFrame.from_dict(scores_dict).max().to_dict(), 'scores_mean': pd.DataFrame.from_dict(scores_dict).mean().to_dict(), 'Test_loss': test_loss, 'Diversity': diversity} print(batch_metrics) print('Time: {}'.format(time.time()-time_start)) if not compute_multi_objective_metric: return volume, volume_oracle, raw_rewards_weight, raw_rewards_mean, test_loss, diversity else: for i in range(len(picked_mols)): picked_mols[i].score = scores_dict[i] # success/diversity/novelty is computed among the top mols. success, positive_mols = compute_success( picked_mols, scores_dict, args.objectives, score_succ) succ_diversity = compute_diversity(positive_mols) if ref_mols: novelty = compute_novelty(positive_mols, ref_mols) else: novelty = 1. mo_metrics = {'success': success, 'novelty': novelty, 'succ_diversity': succ_diversity, } picked_smis = [(raw_rewards[i], picked_mols[i].score, smis[i]) for i in range(len(raw_rewards))] print(mo_metrics) return (picked_mols, scores_dict, picked_smis), batch_metrics, mo_metrics def log_overall_metrics(args, dataset, batch_infos=None, MultiObjective_metrics=None): volume = dataset.compute_hypervolume() print("Hypervolume for {}: {}".format(args.logger.context, volume)) args.logger.add_scalar('Metric/hypervolume', volume, use_context=False) args.logger.add_object('scores', dataset.scores) args.logger.add_object('smis', dataset.smis) if batch_infos: args.logger.add_scalar( 'Metric/test_loss', batch_infos['Test_loss'], use_context=False) args.logger.add_object('collected_info', batch_infos) if MultiObjective_metrics: args.logger.add_scalars('Metric/MultiObjective', MultiObjective_metrics, use_context=False) def get_test_rays(): if args.n_objectives == 3: n_partitions = 6 elif args.n_objectives == 4: n_partitions = 7 test_rays = get_reference_directions("das-dennis", args.n_objectives, n_partitions=n_partitions).astype(np.float32) test_rays = test_rays[[(r > 0).all() for r in test_rays]] print(f"initialize {len(test_rays)} test rays") return test_rays def main(args): set_random_seed(args.seed) args.logger.set_context('iter_0') bpath = "./data/blocks_105.json" dpath = "./data/docked_mols.h5" # Initialize oracle and dataset (for training surrogate function) oracle = Oracle(args) dataset = Dataset(args, bpath, oracle, args.device) dataset.load_h5(dpath, num_init_examples=args.num_init_examples) log_overall_metrics(args, dataset) args.n_objectives = len(args.objectives) # Initialize surrogate function proxy = get_proxy(args, bpath, oracle) proxy.update(dataset, 0, reset=False) for i in range(1, args.num_outer_loop_iters+1): print(f"====== Starting round {i} ======") args.logger.set_context('iter_{}'.format(i)) test_weights = np.random.dirichlet(args.alpha_vector, 5*(2**(args.n_objectives-2))).astype(np.float32) if args.criterion == 'TB': generator = TBGFlowNet(args, bpath) elif args.criterion == 'FM': generator = FMGFlowNet(args, bpath) elif args.criterion == 'Reinforce':
warnings.filterwarnings('ignore') def arg_parse(): parser = argparse.ArgumentParser() parser.add_argument("--device", type=str, default='cuda') parser.add_argument('--seed', type=int, default=42, help='seed') parser.add_argument("--run", default=0, help="run", type=int) parser.add_argument('--save', action='store_true', default=False, help='Save model.') parser.add_argument('--debug',action='store_true', default=False, help='debug mode, no multi thread') parser.add_argument("--enable_tensorboard", action='store_true', default=False) parser.add_argument("--log_dir", default='runs/mobo') parser.add_argument("--include_nblocks", default=False) parser.add_argument("--num_init_examples", default=200, type=int) parser.add_argument("--num_outer_loop_iters", default=8, type=int) parser.add_argument("--num_samples", default=100, type=int) parser.add_argument("--floatX", default='float32') parser.add_argument('--sample_iterations', type=int, default=1000, help='sample mols and compute metrics') parser.add_argument("--log_weight_score", action='store_true', default=False) # objectives parser.add_argument("--objectives", type=str, default='gsk3b,jnk3,qed,sa') parser.add_argument("--acq_fn", default='UCB', type=str) parser.add_argument("--beta", default=0.1, type=float) parser.add_argument("--scalar", default='WeightedSum', type=str) parser.add_argument("--alpha", default=1., type=float, help='dirichlet distribution') parser.add_argument("--alpha_vector", default='1,1,1,1', type=str) # Proxy parser.add_argument("--proxy_normalize", action='store_true', default=False, help='normalize Y') parser.add_argument("--proxy_num_iterations", default=10000, type=int) parser.add_argument("--proxy_learning_rate", default=2.5e-4, help="Learning rate", type=float) parser.add_argument("--proxy_mbsize", default=64, help="Minibatch size", type=int) parser.add_argument("--proxy_early_stop_tol", default=10, type=int) parser.add_argument("--proxy_repr_type", default='atom_graph') parser.add_argument("--proxy_model_version", default='v2') parser.add_argument("--proxy_num_conv_steps", default=12, type=int) parser.add_argument("--proxy_nemb", default=64, help="#hidden", type=int) parser.add_argument("--proxy_weight_decay", default=1e-6, help="Weight Decay in Proxy", type=float) parser.add_argument("--proxy_uncertainty", default="evidential", type=str) # deep ensemble and GP parser.add_argument("--proxy_dropout", default=0.1, help="MC Dropout in Proxy", type=float) parser.add_argument("--proxy_num_dropout_samples", default=5, type=int) parser.add_argument("--evidential_lam", default=0.1, type=float) parser.add_argument( "--fp_radius", type=int, default=2, help="Morgan fingerprint radius." ) parser.add_argument( "--fp_nbits", type=int, default=1024, help="Morgan fingerprint nBits." ) # GFlowNet parser.add_argument("--min_blocks", default=2, type=int) parser.add_argument("--max_blocks", default=8, type=int) parser.add_argument("--num_iterations", default=5000, type=int) parser.add_argument("--criterion", default="FM", type=str) parser.add_argument("--learning_rate", default=5e-4, help="Learning rate", type=float) parser.add_argument("--Z_learning_rate", default=5e-3, help="Learning rate", type=float) parser.add_argument("--clip_grad", default=0, type=float) parser.add_argument("--trajectories_mbsize", default=8, type=int) parser.add_argument("--offline_mbsize", default=8, type=int) parser.add_argument("--hindsight_prob", default=0.2, type=float) parser.add_argument("--hindsight_buffer_mbsize", default=8, type=int) parser.add_argument("--hindsight_trajectories_mbsize", default=8, type=int) parser.add_argument("--reward_min", default=1e-2, type=float) parser.add_argument("--reward_norm", default=1, type=float) parser.add_argument("--reward_exp", default=8, type=float) parser.add_argument("--reward_exp_ramping", default=0, type=float) parser.add_argument("--logit_clipping", default=0., type=float) # Hyperparameters for TB parser.add_argument("--partition_init", default=1, type=float) # Hyperparameters for FM parser.add_argument("--log_reg_c", default=(0.1/8)**4, type=float) parser.add_argument("--balanced_loss", default=True) parser.add_argument("--leaf_coef", default=10, type=float) # Architecture parser.add_argument("--repr_type", default='block_graph') parser.add_argument("--model_version", default='v4') parser.add_argument("--num_conv_steps", default=10, type=int) parser.add_argument("--nemb", default=256, help="#hidden", type=int) parser.add_argument("--weight_decay", default=0, type=float) parser.add_argument("--random_action_prob", default=0.05, type=float) parser.add_argument("--bootstrap_tau", default=0, type=float) parser.add_argument("--condition_type", type=str, default='HN') parser.add_argument("--ray_hidden_dim", default=100, type=int) return parser.parse_args() class BoRolloutWorker(RolloutWorker): def __init__(self, args, bpath, proxy, device): super(BoRolloutWorker, self).__init__(args, bpath, proxy, device) self.hindsight_prob = args.hindsight_prob self.hindsight_mols = defaultdict(list) self.hindsight_smiles = defaultdict(list) self.replay_threshold = 0.9 def _get(self, i, dset, weights=None): # Sample trajectories by walking backwards from the molecules in our dataset # Handle possible multithreading issues when independent threads # add/substract from dset: m = dset[i] if not isinstance(m, BlockMoleculeDataExtended): m = m[-1] r, raw_r = self._get_reward(m, weights) done = 1 samples = [] # a sample is a tuple (parents(s), parent actions, reward(s), s, done) # an action is (blockidx, stemidx) or (-1, x) for 'stop' # so we start with the stop action, unless the molecule is already # a "terminal" node (if it has no stems, no actions). if len(m.stems) and len(m.blocks) < self.max_blocks: samples.append(((m,), ((-1, 0),), weights, weights, r, m, done)) r = done = 0 while len(m.blocks): # and go backwards if self.ignore_parents: parents = self.mdp.parents(m) parent, action = parents[self.train_rng.randint(len(parents))] samples.append(((parent,), (action,), weights, weights, r, m, done)) r = done = 0 m = parent else: parents, actions = zip(*self.mdp.parents(m)) samples.append((parents, actions, weights.repeat(len(parents), 1), weights, r, m, done)) r = done = 0 m = parents[self.train_rng.randint(len(parents))] return samples[::-1] def _add_mol_to_replay(self, m): for i, weights in enumerate(self.test_weights): r, raw_r = self._get_reward(m, weights) if len(self.hindsight_mols[i]) < self.max_hindsight_mols or raw_r[0] > self.hindsight_mols[i][0][0]: if m.smiles not in self.hindsight_smiles[i]: self.hindsight_mols[i].append((raw_r[0].item(), m.smiles, m)) self.hindsight_smiles[i].append(m.smiles) if len(self.hindsight_mols[i]) > self.max_hindsight_mols: self.hindsight_mols[i] = sorted(self.hindsight_mols[i], key=lambda x:(x[0]))[ max(int(0.05 * self.max_hindsight_mols), 1):] self.hindsight_smiles[i] = [x[1] for x in self.hindsight_mols[i]] def _add_mol_to_online(self, r, m, inflow): if self.replay_mode == 'online': r = r + self.train_rng.normal() * 0.01 if len(self.online_mols) < self.max_online_mols or r > self.online_mols[0][0]: self.online_mols.append((r, m)) if len(self.online_mols) > self.max_online_mols: self.online_mols = sorted(self.online_mols)[ max(int(0.05 * self.max_online_mols), 1):] elif self.replay_mode == 'prioritized': self.online_mols.append((abs(inflow - np.log(r)), m)) if len(self.online_mols) > self.max_online_mols * 1.1: self.online_mols = self.online_mols[-self.max_online_mols:] def _get_reward(self, m, weights=None): rdmol = m.mol if rdmol is None: return self.reward_min # get reward from proxy raw_reward, score = self.proxy(m, weights) raw_reward = raw_reward.clip(self.reward_min) reward = self.l2r(raw_reward) return reward, (raw_reward, score) def execute_train_episode_batch(self, generator, dataset=None, Y_bounds=None, use_rand_policy=True): if self.train_rng.uniform() < self.hindsight_prob: idx = self.train_rng.randint(self.test_weights.shape[0]) weights = self.test_weights[idx].unsqueeze(0) samples = sum((self.rollout(generator, use_rand_policy, weights) for i in range(self.args.hindsight_trajectories_mbsize)), []) if self.args.hindsight_buffer_mbsize > 0: buffer = deepcopy(self.hindsight_mols[idx]) reward = np.array([x[0] for x in buffer]) prob = reward / sum(reward) eidx = np.random.choice(list(range(len(buffer))), self.args.hindsight_buffer_mbsize, replace=False, p=prob) offline_samples = sum((self._get(i, buffer, weights) for i in eidx), []) samples += offline_samples else: weights = Dirichlet(torch.tensor(self.args.alpha_vector)*self.args.alpha).sample_n(1).to(self.args.device) #* sample weights per batch, seem better samples = sum((self.rollout(generator, use_rand_policy, weights, replay=True) for i in range(self.args.trajectories_mbsize)), []) # offline sampling from dataset if self.args.offline_mbsize > 0 and dataset is not None: # use the oracle reward scores = torch.tensor(pd.DataFrame.from_dict(dataset.scores).values, dtype=torch.float32).to(args.device) if Y_bounds is not None: scores = normalize(scores, Y_bounds) reward = torch.matmul(scores, weights.reshape(-1, 1)) prob = (reward / sum(reward)).squeeze(1).cpu().numpy() eidx = np.random.choice(list(range(len(dataset.all_mols))), self.args.offline_mbsize, replace=False, p=prob) offline_samples = sum((self._get(i, dataset.all_mols, weights) for i in eidx), []) samples += offline_samples return zip(*samples) def initialize_hindsight_mols(self, dataset): for m in dataset.all_mols: for i, weights in enumerate(self.test_weights): r, raw_r = self._get_reward(m, weights) self.hindsight_mols[i].append((raw_r[0].item(), m.smiles, m)) for i, weights in enumerate(self.test_weights): self.hindsight_mols[i] = sorted(self.hindsight_mols[i], key=lambda x:(x[0])) self.hindsight_smiles[i] = [x[1] for x in self.hindsight_mols[i]] def train_generative_model(args, generator, bpath, proxy, oracle, dataset, test_weights, round_idx, do_save=False): print("Training generator...") os.makedirs(os.path.join(args.log_dir, f'round_{round_idx}'), exist_ok=True) device = args.device rollout_worker = BoRolloutWorker(args, bpath, proxy, device) rollout_worker.test_weights = torch.tensor(test_weights).to(device) rollout_worker.initialize_hindsight_mols(dataset) Y_bounds = torch.stack([proxy.partitioning.Y.min(dim=-2).values, proxy.partitioning.Y.max(dim=-2).values]) def save_stuff(round_idx, iter): torch.save(generator.state_dict(), os.path.join( args.log_dir, 'round_{}/{}_generator_checkpoint.pth'.format(round_idx, iter))) pickle.dump(rollout_worker.sampled_mols, gzip.open(f'{args.log_dir}/sampled_mols.pkl.gz', 'wb')) multi_thread = not args.debug if multi_thread: sampler = rollout_worker.start_samplers(generator, 8, dataset) def stop_everything(): print('joining') rollout_worker.stop_samplers_and_join() last_losses = [] train_losses = [] test_losses = [] test_infos = [] train_infos = [] time_last_check = time.time() for i in range(args.num_iterations + 1): if multi_thread: r = sampler() for thread in rollout_worker.sampler_threads: if thread.failed: stop_everything() pdb.post_mortem(thread.exception.__traceback__) return p, pb, a, pw, w, r, s, d, mols = r else: p, pb, a, pw, w, r, s, d, mols = rollout_worker.sample2batch( rollout_worker.execute_train_episode_batch(generator, dataset, Y_bounds, use_rand_policy=True)) loss = generator.train_step(p, pb, a, pw, w, r, s, d, mols, i) last_losses.append(loss) if not i % 100: train_loss = [np.round(np.mean(i), 3) for i in zip(*last_losses)] train_losses.append(train_loss) args.logger.add_scalar( 'Loss/round{}/train'.format(round_idx), train_loss[0], use_context=False) print('Iter {}: Loss {}, Time {}'.format( i, train_loss, round(time.time() - time_last_check, 3))) time_last_check = time.time() last_losses = [] if not i % args.sample_iterations and i != 0: volume, volume_oracle, reward_weight, reward_mean, test_loss, diversity = sample_batch( args, generator, rollout_worker, oracle, proxy, Y_bounds, compute_multi_objective_metric=False) args.logger.add_scalar( 'round{}/Top-100-sampled/volumes'.format(round_idx), volume, use_context=False) args.logger.add_scalar( 'round{}/Top-100-sampled/volumes_oracle'.format(round_idx), volume_oracle, use_context=False) args.logger.add_scalars( 'round{}/Top-100-sampled/reward_weight'.format(round_idx), reward_weight, use_context=False) args.logger.add_scalar( 'round{}/Top-100-sampled/reward_mean'.format(round_idx), reward_mean, use_context=False) # reward_mean is a dict, the keys are test_weights args.logger.add_scalar( 'round{}/Top-100-sampled/test_loss'.format(round_idx), test_loss, use_context=False) args.logger.add_scalar( 'round{}/Top-100-sampled/dists'.format(round_idx), diversity, use_context=False) if do_save: save_stuff(round_idx, i) stop_everything() if do_save: save_stuff(round_idx, i) checkpoint_path = os.path.join(args.log_dir, f'round_{round_idx}/{i}_generator_checkpoint.pth') generator.load_state_dict(torch.load(checkpoint_path)) return rollout_worker, {'train_losses': train_losses, 'test_losses': test_losses, 'test_infos': test_infos, 'train_infos': train_infos} def sample_batch(args, generator, rollout_worker, oracle=None, proxy=None, ref_mols=None, Y_bounds=None, compute_multi_objective_metric=False): score_succ = {'gsk3b': 0.5, 'jnk3': 0.5, 'drd2': 0.5, 'chemprop_sars': 0.5, 'chemprop_hiv': 0.5, "seh": 0.5, 'qed': 0.6, 'sa': 0.67} if Y_bounds is None: Y_bounds = torch.stack([proxy.partitioning.Y.min( dim=-2).values, proxy.partitioning.Y.max(dim=-2).values]) time_start = time.time() print(f"Sampling molecules...") raw_rewards = [] raw_rewards_weight = {} means = [] picked_mols = [] smis = [] for i, weights in enumerate(rollout_worker.test_weights): sampled_mols = [] sampled_raw_rewards = [] sampled_means = [] sampled_smis = [] while len(sampled_mols) < args.num_samples: rollout_worker.rollout(generator, use_rand_policy=False, weights=torch.tensor(weights).unsqueeze(0).to(args.device)) (raw_r, _, m, trajectory_stats, inflow) = rollout_worker.sampled_mols[-1] sampled_mols.append(m) sampled_raw_rewards.append(raw_r[0].item()) sampled_means.append(raw_r[1]) sampled_smis.append(m.smiles) idx_pick = np.argsort(sampled_raw_rewards)[::-1][:int(args.num_samples/len(rollout_worker.test_weights))] picked_mols.extend(np.array(sampled_mols)[idx_pick].tolist()) means.extend(np.array(sampled_means)[idx_pick].tolist()) smis.extend(np.array(sampled_smis)[idx_pick].tolist()) raw_rewards.extend(np.array(sampled_raw_rewards)[idx_pick].tolist()) raw_rewards_weight[str(weights.cpu())] = np.array(sampled_raw_rewards)[idx_pick].mean() raw_rewards_mean = np.mean(list(raw_rewards_weight.values())) assert len(picked_mols) == args.num_samples top_means = torch.tensor(means) scores_dict = oracle.batch_get_scores(picked_mols) scores = torch.tensor(pd.DataFrame.from_dict(scores_dict).values) test_loss = F.mse_loss(top_means, scores) hypervolume = Hypervolume(ref_point=torch.zeros(len(args.objectives))) volume = hypervolume.compute(top_means) volume_oracle = hypervolume.compute(scores) diversity = compute_diversity(picked_mols) batch_metrics = {'Hypervolume_reward': volume, 'Hypervolume_oracle': volume_oracle, 'Reward_mean': raw_rewards_mean, 'scores_max': pd.DataFrame.from_dict(scores_dict).max().to_dict(), 'scores_mean': pd.DataFrame.from_dict(scores_dict).mean().to_dict(), 'Test_loss': test_loss, 'Diversity': diversity} print(batch_metrics) print('Time: {}'.format(time.time()-time_start)) if not compute_multi_objective_metric: return volume, volume_oracle, raw_rewards_weight, raw_rewards_mean, test_loss, diversity else: for i in range(len(picked_mols)): picked_mols[i].score = scores_dict[i] # success/diversity/novelty is computed among the top mols. success, positive_mols = compute_success( picked_mols, scores_dict, args.objectives, score_succ) succ_diversity = compute_diversity(positive_mols) if ref_mols: novelty = compute_novelty(positive_mols, ref_mols) else: novelty = 1. mo_metrics = {'success': success, 'novelty': novelty, 'succ_diversity': succ_diversity, } picked_smis = [(raw_rewards[i], picked_mols[i].score, smis[i]) for i in range(len(raw_rewards))] print(mo_metrics) return (picked_mols, scores_dict, picked_smis), batch_metrics, mo_metrics def log_overall_metrics(args, dataset, batch_infos=None, MultiObjective_metrics=None): volume = dataset.compute_hypervolume() print("Hypervolume for {}: {}".format(args.logger.context, volume)) args.logger.add_scalar('Metric/hypervolume', volume, use_context=False) args.logger.add_object('scores', dataset.scores) args.logger.add_object('smis', dataset.smis) if batch_infos: args.logger.add_scalar( 'Metric/test_loss', batch_infos['Test_loss'], use_context=False) args.logger.add_object('collected_info', batch_infos) if MultiObjective_metrics: args.logger.add_scalars('Metric/MultiObjective', MultiObjective_metrics, use_context=False) def get_test_rays(): if args.n_objectives == 3: n_partitions = 6 elif args.n_objectives == 4: n_partitions = 7 test_rays = get_reference_directions("das-dennis", args.n_objectives, n_partitions=n_partitions).astype(np.float32) test_rays = test_rays[[(r > 0).all() for r in test_rays]] print(f"initialize {len(test_rays)} test rays") return test_rays def main(args): set_random_seed(args.seed) args.logger.set_context('iter_0') bpath = "./data/blocks_105.json" dpath = "./data/docked_mols.h5" # Initialize oracle and dataset (for training surrogate function) oracle = Oracle(args) dataset = Dataset(args, bpath, oracle, args.device) dataset.load_h5(dpath, num_init_examples=args.num_init_examples) log_overall_metrics(args, dataset) args.n_objectives = len(args.objectives) # Initialize surrogate function proxy = get_proxy(args, bpath, oracle) proxy.update(dataset, 0, reset=False) for i in range(1, args.num_outer_loop_iters+1): print(f"====== Starting round {i} ======") args.logger.set_context('iter_{}'.format(i)) test_weights = np.random.dirichlet(args.alpha_vector, 5*(2**(args.n_objectives-2))).astype(np.float32) if args.criterion == 'TB': generator = TBGFlowNet(args, bpath) elif args.criterion == 'FM': generator = FMGFlowNet(args, bpath) elif args.criterion == 'Reinforce':
generator = MOReinforce(args, bpath)
7
2023-10-24 14:10:35+00:00
24k
caglarkucuk/earthformer-satellite-to-radar
ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer_unet_dec.py
[ { "identifier": "Upsample3DLayer", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class Upsample3DLayer(nn.Module):\n \"\"\"Upsampling based on nn.UpSampling and Conv3x3.\n\n If the temporal dimension remains the same:\n x --> interpolation-2d (neares...
from typing import Sequence, Union from functools import lru_cache from collections import OrderedDict from torch import nn from einops import rearrange from .cuboid_transformer import ( Upsample3DLayer, PatchMerging3D, PosEmbed, InitialEncoder, FinalDecoder, InitialStackPatchMergingEncoder, FinalStackUpsamplingDecoder, StackCuboidSelfAttentionBlock, StackCuboidCrossAttentionBlock, CuboidTransformerEncoder) from .cuboid_transformer_patterns import CuboidSelfAttentionPatterns, CuboidCrossAttentionPatterns from .utils import ( get_activation, get_norm_layer, _generalize_padding, _generalize_unpadding, apply_initialization, round_to) import warnings import torch import torch.nn.functional as F import torch.utils.checkpoint as checkpoint
15,903
cross_last_n_frames=None, num_heads=4, attn_drop=0.0, proj_drop=0.0, ffn_drop=0.0, ffn_activation='leaky', gated_ffn=False, norm_layer='layer_norm', use_inter_ffn=False, hierarchical_pos_embed=False, pos_embed_type='t+hw', max_temporal_relative=50, padding_type='ignore', checkpoint_level=True, use_relative_pos=True, self_attn_use_final_proj=True, # global vectors use_self_global=False, self_update_global=True, use_cross_global=False, use_global_vector_ffn=True, use_global_self_attn=False, separate_global_qkv=False, global_dim_ratio=1, # initialization attn_linear_init_mode="0", ffn_linear_init_mode="0", conv_init_mode="0", up_linear_init_mode="0", norm_init_mode="0", # different from `CuboidTransformerDecoder`, no arg `use_first_self_attn=False` downsample=2, downsample_type='patch_merge', cross_mode="up", down_linear_init_mode="0", ): """ Parameters ---------- target_temporal_length mem_shapes cross_start The block to start cross attention depth Depth of each block downsample The downsample ratio downsample_type Type of the downsampling layer upsample_type The type of the upsampling layers upsample_kernel_size block_self_attn_patterns Pattern of the block self attentions block_self_cuboid_size block_self_cuboid_strategy block_self_shift_size block_cross_attn_patterns block_cross_cuboid_hw block_cross_cuboid_strategy block_cross_shift_hw block_cross_n_temporal cross_last_n_frames cross_mode Must be one of ("up", "down", "both") Control whether the upsampling/downsampling/both phases cross attend to the encoded latent features num_heads attn_drop proj_drop ffn_drop ffn_activation gated_ffn Whether to enable gated ffn or not norm_layer The normalization layer use_inter_ffn Whether to use intermediate FFN hierarchical_pos_embed Whether to add pos embedding for each hierarchy. max_temporal_relative padding_type checkpoint_level """ super(CuboidTransformerUNetDecoder, self).__init__() # initialization mode self.attn_linear_init_mode = attn_linear_init_mode self.ffn_linear_init_mode = ffn_linear_init_mode self.conv_init_mode = conv_init_mode self.up_linear_init_mode = up_linear_init_mode self.norm_init_mode = norm_init_mode assert len(depth) == len(mem_shapes) self.target_temporal_length = target_temporal_length self.num_blocks = len(mem_shapes) self.cross_start = cross_start self.mem_shapes = mem_shapes self.block_units = tuple(mem_shape[-1] for mem_shape in self.mem_shapes) self.depth = depth if not isinstance(downsample, (tuple, list)): downsample = (1, downsample, downsample) self.downsample = downsample self.downsample_type = downsample_type self.upsample_type = upsample_type self.hierarchical_pos_embed = hierarchical_pos_embed self.checkpoint_level = checkpoint_level self.use_self_global = use_self_global self.self_update_global = self_update_global self.use_cross_global = use_cross_global self.use_global_vector_ffn = use_global_vector_ffn assert cross_mode in ["up", "down", "both"], f"Invalid cross_mode {cross_mode}!" self.cross_mode = cross_mode self.up_use_cross = self.cross_mode in ["up", "both"] self.down_use_cross = self.cross_mode in ["down", "both"] if self.num_blocks > 1: # Construct downsampling layers if downsample_type == 'patch_merge': self.downsample_layers = nn.ModuleList(
"""CuboidTransformer adapted for auxiliary inputs in decoder""" class CuboidTransformerUNetDecoder(nn.Module): """U-Net style Decoder of the CuboidTransformer. For each block, we first apply the StackCuboidSelfAttention and then apply the StackCuboidCrossAttention We add cross attention following 3 modes: cross_mode == "down": x --> attn --> cross_attn --> downscale --> ... --> z --> attn --> upscale --> ... --> out ^ ^ | | | | mem mem cross_mode == "up": x --> attn --> downscale --> ... --> z --> attn --> cross_attn --> upscale --> ... --> out ^ ^ | | | | mem mem cross_mode == "both": x --> attn --> cross_attn --> downscale --> ... --> z --> attn --> cross_attn --> upscale --> ... --> out ^ ^ ^ ^ | | | | | | | | mem mem mem mem """ def __init__(self, target_temporal_length, mem_shapes, cross_start=0, depth=[2, 2], upsample_type="upsample", upsample_kernel_size=3, block_self_attn_patterns=None, block_self_cuboid_size=[(4, 4, 4), (4, 4, 4)], block_self_cuboid_strategy=[('l', 'l', 'l'), ('d', 'd', 'd')], block_self_shift_size=[(1, 1, 1), (0, 0, 0)], block_cross_attn_patterns=None, block_cross_cuboid_hw=[(4, 4), (4, 4)], block_cross_cuboid_strategy=[('l', 'l', 'l'), ('d', 'l', 'l')], block_cross_shift_hw=[(0, 0), (0, 0)], block_cross_n_temporal=[1, 2], cross_last_n_frames=None, num_heads=4, attn_drop=0.0, proj_drop=0.0, ffn_drop=0.0, ffn_activation='leaky', gated_ffn=False, norm_layer='layer_norm', use_inter_ffn=False, hierarchical_pos_embed=False, pos_embed_type='t+hw', max_temporal_relative=50, padding_type='ignore', checkpoint_level=True, use_relative_pos=True, self_attn_use_final_proj=True, # global vectors use_self_global=False, self_update_global=True, use_cross_global=False, use_global_vector_ffn=True, use_global_self_attn=False, separate_global_qkv=False, global_dim_ratio=1, # initialization attn_linear_init_mode="0", ffn_linear_init_mode="0", conv_init_mode="0", up_linear_init_mode="0", norm_init_mode="0", # different from `CuboidTransformerDecoder`, no arg `use_first_self_attn=False` downsample=2, downsample_type='patch_merge', cross_mode="up", down_linear_init_mode="0", ): """ Parameters ---------- target_temporal_length mem_shapes cross_start The block to start cross attention depth Depth of each block downsample The downsample ratio downsample_type Type of the downsampling layer upsample_type The type of the upsampling layers upsample_kernel_size block_self_attn_patterns Pattern of the block self attentions block_self_cuboid_size block_self_cuboid_strategy block_self_shift_size block_cross_attn_patterns block_cross_cuboid_hw block_cross_cuboid_strategy block_cross_shift_hw block_cross_n_temporal cross_last_n_frames cross_mode Must be one of ("up", "down", "both") Control whether the upsampling/downsampling/both phases cross attend to the encoded latent features num_heads attn_drop proj_drop ffn_drop ffn_activation gated_ffn Whether to enable gated ffn or not norm_layer The normalization layer use_inter_ffn Whether to use intermediate FFN hierarchical_pos_embed Whether to add pos embedding for each hierarchy. max_temporal_relative padding_type checkpoint_level """ super(CuboidTransformerUNetDecoder, self).__init__() # initialization mode self.attn_linear_init_mode = attn_linear_init_mode self.ffn_linear_init_mode = ffn_linear_init_mode self.conv_init_mode = conv_init_mode self.up_linear_init_mode = up_linear_init_mode self.norm_init_mode = norm_init_mode assert len(depth) == len(mem_shapes) self.target_temporal_length = target_temporal_length self.num_blocks = len(mem_shapes) self.cross_start = cross_start self.mem_shapes = mem_shapes self.block_units = tuple(mem_shape[-1] for mem_shape in self.mem_shapes) self.depth = depth if not isinstance(downsample, (tuple, list)): downsample = (1, downsample, downsample) self.downsample = downsample self.downsample_type = downsample_type self.upsample_type = upsample_type self.hierarchical_pos_embed = hierarchical_pos_embed self.checkpoint_level = checkpoint_level self.use_self_global = use_self_global self.self_update_global = self_update_global self.use_cross_global = use_cross_global self.use_global_vector_ffn = use_global_vector_ffn assert cross_mode in ["up", "down", "both"], f"Invalid cross_mode {cross_mode}!" self.cross_mode = cross_mode self.up_use_cross = self.cross_mode in ["up", "both"] self.down_use_cross = self.cross_mode in ["down", "both"] if self.num_blocks > 1: # Construct downsampling layers if downsample_type == 'patch_merge': self.downsample_layers = nn.ModuleList(
[PatchMerging3D(dim=self.block_units[i],
1
2023-10-23 11:45:50+00:00
24k
IBM/VillanDiffusion
make_latent_dataset.py
[ { "identifier": "DiffuserModelSched", "path": "model.py", "snippet": "class DiffuserModelSched():\n LR_SCHED_CKPT: str = \"lr_sched.pth\"\n OPTIM_CKPT: str = \"optim.pth\"\n \n SDE_VP: str = \"SDE-VP\"\n SDE_VE: str = \"SDE-VE\"\n SDE_LDM: str = \"SDE-LDM\"\n CLIP_SAMPLE_DEFAULT = F...
import glob import os import pathlib import torch from dataclasses import dataclass from typing import List, Union from joblib import Parallel, delayed from PIL import Image from tqdm import tqdm from datasets import Dataset from torchvision import transforms from torchvision.transforms import Compose from torch.utils.data import DataLoader, ConcatDataset, Subset, IterableDataset from model import DiffuserModelSched from dataset import DatasetLoader, Backdoor
21,517
@dataclass class TrainingConfig: latent_dataset_dir: str = 'celeba_hq_256_latents' dataset_name: str = DatasetLoader.CELEBA_HQ
@dataclass class TrainingConfig: latent_dataset_dir: str = 'celeba_hq_256_latents' dataset_name: str = DatasetLoader.CELEBA_HQ
trigger: str = Backdoor.TRIGGER_SM_STOP_SIGN
2
2023-10-17 19:57:37+00:00
24k
nchen909/Pass-Tuning
evaluator/CodeBLEU/syntax_match.py
[ { "identifier": "DFG_python", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_python(root_node,index_to_code,states):\n assignment=['assignment','augmented_assignment','for_in_clause']\n if_statement=['if_statement']\n for_statement=['for_statement']\n while_statement=['while...
from evaluator.CodeBLEU.parser import DFG_python, DFG_java, DFG_ruby, DFG_go, DFG_php, DFG_javascript, DFG_csharp,DFG_c from evaluator.CodeBLEU.parser import (remove_comments_and_docstrings, tree_to_token_index, index_to_code_token, tree_to_variable_index) from tree_sitter import Language, Parser
17,407
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. parser_path = '/data/pretrain-attention/CodePrompt/evaluator/CodeBLEU/parser' dfg_function = { 'python': DFG_python, 'java': DFG_java, 'ruby': DFG_ruby, 'go': DFG_go,
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. parser_path = '/data/pretrain-attention/CodePrompt/evaluator/CodeBLEU/parser' dfg_function = { 'python': DFG_python, 'java': DFG_java, 'ruby': DFG_ruby, 'go': DFG_go,
'php': DFG_php,
4
2023-10-20 09:24:44+00:00
24k
JoaoPedro9674/django-ledger
django_ledger/io/io_mixin.py
[ { "identifier": "settings", "path": "django_ledger/settings.py", "snippet": " DJANGO_LEDGER_GRAPHQL_SUPPORT_ENABLED = True\n DJANGO_LEDGER_GRAPHQL_SUPPORT_ENABLED = False\n DJANGO_LEDGER_PDF_SUPPORT_ENABLED = True\n DJANGO_LEDGER_PDF_SUPPORT_ENABLED = False\nDJANGO_LEDGER_USE_CLOSING_ENTRIES...
from collections import defaultdict, namedtuple from datetime import datetime, date from itertools import groupby from pathlib import Path from random import choice from typing import List, Set, Union, Tuple, Optional, Dict from django.contrib.auth import get_user_model from django.core.exceptions import ValidationError, ObjectDoesNotExist from django.db.models import Sum, QuerySet from django.db.models.functions import TruncMonth from django.http import Http404 from django.utils.dateparse import parse_date, parse_datetime from django.utils.timezone import make_aware, is_naive, localtime from django.utils.translation import gettext_lazy as _ from django_ledger import settings from django_ledger.exceptions import InvalidDateInputError, TransactionNotInBalanceError from django_ledger.io import roles as roles_module from django_ledger.io.io_context import (RoleContextManager, GroupContextManager, ActivityContextManager, BalanceSheetStatementContextManager, IncomeStatementContextManager, CashFlowStatementContextManager) from django_ledger.io.io_digest import IODigestContextManager from django_ledger.io.ratios import FinancialRatioManager from django_ledger.models.utils import lazy_loader
15,760
a['account__uuid'], a.get('journal_entry__entity_unit__uuid') if by_unit else None, a.get('dt_idx').year if by_period else None, a.get('dt_idx').month if by_period else None, a.get('journal_entry__activity') if by_activity else None, a.get('tx_type') if by_tx_type else None, )) gb_digest = [self.aggregate_balances(k, g) for k, g in accounts_gb_code] for acc in gb_digest: acc['balance_abs'] = abs(acc['balance']) if signs: TransactionModel = lazy_loader.get_txs_model() for acc in gb_digest: if any([ all([acc['role_bs'] == roles_module.BS_ASSET_ROLE, acc['balance_type'] == TransactionModel.CREDIT]), all([acc['role_bs'] in ( roles_module.BS_LIABILITIES_ROLE, roles_module.BS_EQUITY_ROLE ), acc['balance_type'] == TransactionModel.DEBIT]) ]): acc['balance'] = -acc['balance'] return txs_queryset, gb_digest @staticmethod def aggregate_balances(k, g): gl = list(g) return { 'account_uuid': k[0], 'unit_uuid': k[1], 'unit_name': gl[0].get('journal_entry__entity_unit__name'), 'activity': gl[0].get('journal_entry__activity'), 'period_year': k[2], 'period_month': k[3], 'role_bs': roles_module.BS_ROLES.get(gl[0]['account__role']), 'role': gl[0]['account__role'], 'code': gl[0]['account__code'], 'name': gl[0]['account__name'], 'balance_type': gl[0]['account__balance_type'], 'tx_type': k[5], 'balance': sum(a['balance'] for a in gl), } def digest(self, entity_slug: str = None, unit_slug: str = None, user_model: UserModel = None, txs_queryset: QuerySet = None, as_io_digest: bool = False, accounts: Optional[Union[Set[str], List[str]]] = None, role: Optional[Union[Set[str], List[str]]] = None, activity: str = None, signs: bool = True, to_date: Union[str, datetime, date] = None, from_date: Union[str, datetime, date] = None, process_roles: bool = False, process_groups: bool = False, process_ratios: bool = False, process_activity: bool = False, equity_only: bool = False, by_period: bool = False, by_unit: bool = False, by_activity: bool = False, by_tx_type: bool = False, digest_name: str = None, balance_sheet_statement: bool = False, income_statement: bool = False, cash_flow_statement: bool = False, **kwargs) -> Union[Tuple, IODigestContextManager]: if balance_sheet_statement: from_date = None if cash_flow_statement: by_activity = True if activity: activity = validate_activity(activity) if role: role = roles_module.validate_roles(role) from_date, to_date = validate_dates(from_date, to_date) io_data = defaultdict(lambda: dict()) io_data['io_model'] = self io_data['from_date'] = from_date io_data['to_date'] = to_date io_data['by_unit'] = by_unit io_data['by_period'] = by_period io_data['by_activity'] = by_activity io_data['by_tx_type'] = by_tx_type txs_qs, accounts_digest = self.python_digest( txs_queryset=txs_queryset, user_model=user_model, accounts=accounts, role=role, activity=activity, entity_slug=entity_slug, unit_slug=unit_slug, to_date=to_date, from_date=from_date, signs=signs, equity_only=equity_only, by_period=by_period, by_unit=by_unit, by_activity=by_activity, by_tx_type=by_tx_type, **kwargs ) io_data['txs_qs'] = txs_qs io_data['accounts'] = accounts_digest if process_roles:
""" Django Ledger created by Miguel Sanda <msanda@arrobalytics.com>. Copyright© EDMA Group Inc licensed under the GPLv3 Agreement. Contributions to this module: * Miguel Sanda <msanda@arrobalytics.com> """ UserModel = get_user_model() def diff_tx_data(tx_data: list, raise_exception: bool = True): IS_TX_MODEL = False TransactionModel = lazy_loader.get_txs_model() if isinstance(tx_data[0], TransactionModel): CREDITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'credit') DEBITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'debit') IS_TX_MODEL = True elif isinstance(tx_data[0], dict): CREDITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'credit') DEBITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'debit') else: raise ValidationError('Only Dictionary or TransactionModel allowed.') is_valid = (CREDITS == DEBITS) diff = CREDITS - DEBITS if not is_valid and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: if raise_exception: raise TransactionNotInBalanceError( f'Invalid tx data. Credits and debits must match. Currently cr: {CREDITS}, db {DEBITS}.' f'Max Tolerance {settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE}' ) return IS_TX_MODEL, is_valid, diff def check_tx_balance(tx_data: list, perform_correction: bool = False) -> bool: if tx_data: IS_TX_MODEL, is_valid, diff = diff_tx_data(tx_data, raise_exception=perform_correction) if not perform_correction and abs(diff): return False if not perform_correction and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: return False while not is_valid: tx_type_choice = choice(['debit', 'credit']) txs_candidates = list(tx for tx in tx_data if tx['tx_type'] == tx_type_choice) if len(txs_candidates) > 0: tx = choice(list(tx for tx in tx_data if tx['tx_type'] == tx_type_choice)) if any([diff > 0 and tx_type_choice == 'debit', diff < 0 and tx_type_choice == 'credit']): if IS_TX_MODEL: tx.amount += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION else: tx['amount'] += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION elif any([diff < 0 and tx_type_choice == 'debit', diff > 0 and tx_type_choice == 'credit']): if IS_TX_MODEL: tx.amount -= settings.DJANGO_LEDGER_TRANSACTION_CORRECTION else: tx['amount'] += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION IS_TX_MODEL, is_valid, diff = diff_tx_data(tx_data) return True def validate_io_date(dt: Union[str, date, datetime], no_parse_localdate: bool = True) -> Optional[datetime]: if not dt: return if isinstance(dt, date): dt = make_aware( value=datetime.combine( dt, datetime.min.time() )) return dt elif isinstance(dt, datetime): if is_naive(dt): return make_aware(dt) return dt elif isinstance(dt, str): # try to parse a date object from string... fdt = parse_date(dt) if not fdt: # try to parse a datetime object from string... fdt = parse_datetime(dt) if not fdt: raise InvalidDateInputError( message=f'Could not parse date from {dt}' ) elif is_naive(fdt): fdt = make_aware(fdt) return fdt if no_parse_localdate: return localtime() def validate_dates( from_date: Union[str, datetime, date] = None, to_date: Union[str, datetime, date] = None) -> Tuple[date, date]: from_date = validate_io_date(from_date, no_parse_localdate=False) to_date = validate_io_date(to_date) return from_date, to_date def validate_activity(activity: str, raise_404: bool = False): # idea: move to model???... JournalEntryModel = lazy_loader.get_journal_entry_model() valid = activity in JournalEntryModel.VALID_ACTIVITIES if activity and not valid: exception = ValidationError(f'{activity} is invalid. Choices are {JournalEntryModel.VALID_ACTIVITIES}.') if raise_404: raise Http404(exception) raise exception return activity class IOValidationError(ValidationError): pass class IODatabaseMixIn: """ Controls how transactions are recorded into the ledger. """ def is_entity_model(self): return isinstance(self, lazy_loader.get_entity_model()) def is_ledger_model(self): return isinstance(self, lazy_loader.get_ledger_model()) def is_entity_unit_model(self): return isinstance(self, lazy_loader.get_unit_model()) def get_entity_model_from_io(self): if self.is_entity_model(): return self elif self.is_ledger_model(): return self.entity elif self.is_entity_unit_model(): return self.entity # def is_time_bounded(self, from_date, to_date): def database_digest(self, txs_queryset: QuerySet, entity_slug: str = None, unit_slug: str = None, user_model: UserModel = None, from_date: date = None, to_date: date = None, activity: str = None, role: str = None, accounts: str or List[str] or Set[str] = None, posted: bool = True, exclude_zero_bal: bool = True, by_activity: bool = False, by_tx_type: bool = False, by_period: bool = False, by_unit: bool = False, **kwargs): if settings.DJANGO_LEDGER_USE_CLOSING_ENTRIES: if not from_date: entity_model = self.get_entity_model_from_io() closing_entry_date = entity_model.select_closing_entry_for_io_date(to_date=to_date) # print(closing_entry_date) # # if closing_entry_date: # closing_entry_list = entity_model.get_closing_entry_cache_for_date( # closing_date=closing_entry_date, # force_cache_update=True # ) # from_date_d = closing_entry_date + timedelta(days=1) # print('Orig From:', from_date) # print('New from:', from_date_d) # print('To Date:', to_date) # print(closing_entry_list) if not txs_queryset: TransactionModel = lazy_loader.get_txs_model() if self.is_entity_model(): if entity_slug: if entity_slug != self.slug: raise IOValidationError('Inconsistent entity_slug. ' f'Provided {entity_slug} does not match actual {self.slug}') if unit_slug: txs_queryset = TransactionModel.objects.for_unit( user_model=user_model, entity_slug=entity_slug or self.slug, unit_slug=unit_slug ) else: txs_queryset = TransactionModel.objects.for_entity( user_model=user_model, entity_slug=self ) elif self.is_ledger_model(): if not entity_slug: raise IOValidationError( 'Calling digest from Ledger Model requires entity_slug explicitly for safety') txs_queryset = TransactionModel.objects.for_ledger( user_model=user_model, entity_slug=entity_slug, ledger_model=self ) elif self.is_entity_unit_model(): if not entity_slug: raise IOValidationError( 'Calling digest from Entity Unit requires entity_slug explicitly for safety') txs_queryset = TransactionModel.objects.for_unit( user_model=user_model, entity_slug=entity_slug, unit_slug=unit_slug or self ) else: txs_queryset = TransactionModel.objects.none() txs_queryset = txs_queryset.not_closing_entry() if exclude_zero_bal: txs_queryset = txs_queryset.filter(amount__gt=0) if posted: txs_queryset = txs_queryset.posted() if from_date: txs_queryset = txs_queryset.from_date(from_date=from_date) if to_date: txs_queryset = txs_queryset.to_date(to_date=to_date) if accounts: if not isinstance(accounts, str): accounts = [accounts] txs_queryset = txs_queryset.for_accounts(account_list=accounts) if activity: if isinstance(activity, str): activity = [activity] txs_queryset = txs_queryset.for_activity(activity_list=activity) if role: txs_queryset = txs_queryset.for_roles(role_list=role) VALUES = [ 'account__uuid', 'account__balance_type', 'tx_type', 'account__code', 'account__name', 'account__role', ] ANNOTATE = {'balance': Sum('amount')} ORDER_BY = ['account__uuid'] if by_unit: ORDER_BY.append('journal_entry__entity_unit__uuid') VALUES += ['journal_entry__entity_unit__uuid', 'journal_entry__entity_unit__name'] if by_period: ORDER_BY.append('journal_entry__timestamp') ANNOTATE['dt_idx'] = TruncMonth('journal_entry__timestamp') if by_activity: ORDER_BY.append('journal_entry__activity') VALUES.append('journal_entry__activity') if by_tx_type: ORDER_BY.append('tx_type') VALUES.append('tx_type') return txs_queryset.values(*VALUES).annotate(**ANNOTATE).order_by(*ORDER_BY) def python_digest(self, txs_queryset: Optional[QuerySet] = None, user_model: Optional[UserModel] = None, to_date: date = None, from_date: date = None, equity_only: bool = False, activity: str = None, entity_slug: str = None, unit_slug: str = None, role: Optional[Union[Set[str], List[str]]] = None, accounts: Optional[Union[Set[str], List[str]]] = None, signs: bool = False, by_unit: bool = False, by_activity: bool = False, by_tx_type: bool = False, by_period: bool = False, **kwargs) -> list or tuple: if equity_only: role = roles_module.GROUP_EARNINGS txs_queryset = self.database_digest( user_model=user_model, txs_queryset=txs_queryset, to_date=to_date, from_date=from_date, entity_slug=entity_slug, unit_slug=unit_slug, activity=activity, role=role, accounts=accounts, by_unit=by_unit, by_activity=by_activity, by_tx_type=by_tx_type, by_period=by_period, **kwargs) for tx_model in txs_queryset: if tx_model['account__balance_type'] != tx_model['tx_type']: tx_model['balance'] = -tx_model['balance'] # txs_list = list(txs_queryset) # txs_list.sort(key=lambda a: ( # a['account__uuid'], # str(a.get('journal_entry__entity_unit__uuid', '')) if by_unit else '', # a['dt_idx'].year if by_period else 0, # a['dt_idx'].month if by_period else 0, # str(a['journal_entry__activity']) if by_activity else None, # a['tx_type'] if by_tx_type else '', # )) accounts_gb_code = groupby(txs_queryset, key=lambda a: ( a['account__uuid'], a.get('journal_entry__entity_unit__uuid') if by_unit else None, a.get('dt_idx').year if by_period else None, a.get('dt_idx').month if by_period else None, a.get('journal_entry__activity') if by_activity else None, a.get('tx_type') if by_tx_type else None, )) gb_digest = [self.aggregate_balances(k, g) for k, g in accounts_gb_code] for acc in gb_digest: acc['balance_abs'] = abs(acc['balance']) if signs: TransactionModel = lazy_loader.get_txs_model() for acc in gb_digest: if any([ all([acc['role_bs'] == roles_module.BS_ASSET_ROLE, acc['balance_type'] == TransactionModel.CREDIT]), all([acc['role_bs'] in ( roles_module.BS_LIABILITIES_ROLE, roles_module.BS_EQUITY_ROLE ), acc['balance_type'] == TransactionModel.DEBIT]) ]): acc['balance'] = -acc['balance'] return txs_queryset, gb_digest @staticmethod def aggregate_balances(k, g): gl = list(g) return { 'account_uuid': k[0], 'unit_uuid': k[1], 'unit_name': gl[0].get('journal_entry__entity_unit__name'), 'activity': gl[0].get('journal_entry__activity'), 'period_year': k[2], 'period_month': k[3], 'role_bs': roles_module.BS_ROLES.get(gl[0]['account__role']), 'role': gl[0]['account__role'], 'code': gl[0]['account__code'], 'name': gl[0]['account__name'], 'balance_type': gl[0]['account__balance_type'], 'tx_type': k[5], 'balance': sum(a['balance'] for a in gl), } def digest(self, entity_slug: str = None, unit_slug: str = None, user_model: UserModel = None, txs_queryset: QuerySet = None, as_io_digest: bool = False, accounts: Optional[Union[Set[str], List[str]]] = None, role: Optional[Union[Set[str], List[str]]] = None, activity: str = None, signs: bool = True, to_date: Union[str, datetime, date] = None, from_date: Union[str, datetime, date] = None, process_roles: bool = False, process_groups: bool = False, process_ratios: bool = False, process_activity: bool = False, equity_only: bool = False, by_period: bool = False, by_unit: bool = False, by_activity: bool = False, by_tx_type: bool = False, digest_name: str = None, balance_sheet_statement: bool = False, income_statement: bool = False, cash_flow_statement: bool = False, **kwargs) -> Union[Tuple, IODigestContextManager]: if balance_sheet_statement: from_date = None if cash_flow_statement: by_activity = True if activity: activity = validate_activity(activity) if role: role = roles_module.validate_roles(role) from_date, to_date = validate_dates(from_date, to_date) io_data = defaultdict(lambda: dict()) io_data['io_model'] = self io_data['from_date'] = from_date io_data['to_date'] = to_date io_data['by_unit'] = by_unit io_data['by_period'] = by_period io_data['by_activity'] = by_activity io_data['by_tx_type'] = by_tx_type txs_qs, accounts_digest = self.python_digest( txs_queryset=txs_queryset, user_model=user_model, accounts=accounts, role=role, activity=activity, entity_slug=entity_slug, unit_slug=unit_slug, to_date=to_date, from_date=from_date, signs=signs, equity_only=equity_only, by_period=by_period, by_unit=by_unit, by_activity=by_activity, by_tx_type=by_tx_type, **kwargs ) io_data['txs_qs'] = txs_qs io_data['accounts'] = accounts_digest if process_roles:
roles_mgr = RoleContextManager(
4
2023-10-20 01:07:20+00:00
24k
acolas1/KGSimple
simplify.py
[ { "identifier": "FluencyScorer", "path": "scoring/fluency_scorer.py", "snippet": "class FluencyScorer:\n def __init__(self, batch_size=1, reduce=\"mean\", log=True, laplace_smooth=False, prob_dict_path=None):\n self.device = \"cuda:1\" if torch.cuda.is_available() else \"cpu\"\n self.ba...
import os import json import numpy as np import pandas as pd import torch import random from collections import defaultdict from transformers import BartTokenizer, T5Tokenizer from transformers import AdamW, get_linear_schedule_with_warmup from utils import * from scoring.fluency_scorer import FluencyScorer from scoring.saliency_scorer import SaliencyBERTScore from scoring.simplicity_scorer import SimplicityTextScore from scoring.guardrails import * from scoring.aggregate_scorer import ScorerWrapper from GAP.data_relations_as_nodes import GAPDataloader, EventDataset, WebNLGDataset from GAP.data_relations_as_nodes import evaluate_bleu, get_t_emb_dim from tqdm import tqdm, trange from rake_nltk import Rake from evaluate import load from sentence_similarity import sentence_similarity from GAP.modeling_gap_type import GAPBartForConditionalGeneration as GAP_Type_model from GAP.modeling_gap import GAPBartForConditionalGeneration as GAP_model
21,357
# import yake bertscore = load("bertscore") ## sentence model for merge phrase_model = sentence_similarity(model_name='distilbert-base-uncased',embedding_type='cls_token_embedding') ## for sentence checking ner_check = NERInaccuracyPenalty() def run(args, logger): #load in model for graph-to-text and tokenizer checkpoint = args.model_path tokenizer_path = args.tokenizer_path tokenizer = BartTokenizer.from_pretrained(tokenizer_path) n_gpu = torch.cuda.device_count() if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if args.type_encoding: t_emb_dim = get_t_emb_dim(args) model = GAP_Type_model.from_pretrained(checkpoint,t_emb_dim=t_emb_dim) else:
# import yake bertscore = load("bertscore") ## sentence model for merge phrase_model = sentence_similarity(model_name='distilbert-base-uncased',embedding_type='cls_token_embedding') ## for sentence checking ner_check = NERInaccuracyPenalty() def run(args, logger): #load in model for graph-to-text and tokenizer checkpoint = args.model_path tokenizer_path = args.tokenizer_path tokenizer = BartTokenizer.from_pretrained(tokenizer_path) n_gpu = torch.cuda.device_count() if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if args.type_encoding: t_emb_dim = get_t_emb_dim(args) model = GAP_Type_model.from_pretrained(checkpoint,t_emb_dim=t_emb_dim) else:
model = GAP_model.from_pretrained(checkpoint)
8
2023-10-24 13:24:23+00:00
24k
S-LoRA/S-LoRA
slora/server/router/manager.py
[ { "identifier": "SamplingParams", "path": "slora/server/sampling_params.py", "snippet": "class SamplingParams:\n\n def __init__(\n self,\n do_sample: bool = False,\n presence_penalty: float = 0.0,\n frequency_penalty: float = 0.0,\n temperature: float = 1.0,\n ...
import uvloop import asyncio import os import pickle import time import torch import zmq import zmq.asyncio import traceback from typing import Dict, List, Optional from ..sampling_params import SamplingParams from ..io_struct import Req, Batch, BatchAbortReq from .model_infer.model_rpc import start_model_process, ModelRpcClient from .req_queue import ReqQueue from rpyc.utils.classic import obtain from slora.utils.infer_utils import calculate_time from ..io_struct import BatchTokenIdOut, AbortReq from .stats import Stats from slora.server.input_params import InputParams from slora.models.peft.lora_adapter import get_lora_config from slora.server.router.profiler import AlphaModel, BetaModel from slora.server.router.abort_req_queue import AbortReqQueue from slora.server.router.cluster_req_queue import ClusterReqQueue from slora.server.router.vtc_req_queue import VTCReqQueue from slora.server.router.pets_req_queue import PETSReqQueue from slora.server.router.peft_req_queue import PEFTReqQueue
15,413
self.has_wait_tokens += 1 return else: new_mini_batch = self.req_queue.generate_new_batch(self.running_batch, self.lora_ranks) if self.input_params.enable_abort and len(self.req_queue.abort_req_list) > 0: self.send_to_detokenization.send_pyobj(BatchAbortReq(self.req_queue.abort_req_list)) self.req_queue.reset_abort_list() if new_mini_batch is not None: self.stats_tool.count_prompt_tokens(new_mini_batch) if not self.input_params.no_lora: ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].load_adapters(new_mini_batch.adapter_dirs)) await asyncio.gather(*ret) await self._prefill_batch(new_mini_batch, minibatch=True) if not new_mini_batch.is_clear(): await self._merge_batch(self.running_batch, new_mini_batch) self.running_batch.merge(new_mini_batch) self.has_wait_tokens = 0 else: self.stats_tool.count_output_tokens(self.running_batch) await self._decode_batch(self.running_batch) await self._filter_runing_batch() async def _init_batch(self, batch: Batch): reqs = [r.to_rpc_obj() for r in batch.reqs] rets = [self.model_rpcs[tp_rank].init_batch(batch.batch_id, reqs) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _prefill_batch(self, batch, minibatch=True): await self._init_batch(batch) rets = [self.model_rpcs[tp_rank].prefill_batch(batch.batch_id) for tp_rank in range(self.world_size)] ans = await asyncio.gather(*rets) if self.world_size != 1: req_to_out_token_id = obtain(ans[0]) else: req_to_out_token_id = ans[0] self._add_token_id_to_req(batch, req_to_out_token_id) has_new_finished_req = batch.mark_finished_req(self.eos_id) self._send_to_detokenization_proc(batch, req_to_out_token_id) await self._handle_finish_req(batch, has_new_finished_req, minibatch=True) return async def _decode_batch(self, batch:Batch): self.req_queue.update_counter(batch) rets = [self.model_rpcs[tp_rank].decode_batch(batch.batch_id) for tp_rank in range(self.world_size)] ans = await asyncio.gather(*rets) if self.world_size != 1: req_to_out_token_id = obtain(ans[0]) else: req_to_out_token_id = ans[0] self._add_token_id_to_req(batch, req_to_out_token_id) has_new_finished_req = batch.mark_finished_req(self.eos_id) self._send_to_detokenization_proc(batch, req_to_out_token_id) await self._handle_finish_req(batch, has_new_finished_req) return async def _filter_batch(self, batch: Batch): req_id_list = [r.request_id for r in batch.reqs] rets = [self.model_rpcs[tp_rank].filter_batch(batch.batch_id, req_id_list) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _merge_batch(self, batch1, batch2): rets = [self.model_rpcs[tp_rank].merge_batch(batch1.batch_id, batch2.batch_id) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _remove_batch(self, batch): rets = [self.model_rpcs[tp_rank].remove_batch(batch.batch_id) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _handle_finish_req(self, batch: Batch, has_new_finished_req, minibatch=False): if has_new_finished_req: batch.filter_finished() # unmerge adapter from base model if self.input_params.scheduler == "peft" and batch.is_clear(): ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].unmerge_adapter()) await asyncio.gather(*ret) if not minibatch and not self.input_params.no_lora: ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].offload_adapters(batch.adapter_dirs)) await asyncio.gather(*ret) if batch.is_clear(): await self._remove_batch(batch) else: await self._filter_batch(batch) return async def _filter_runing_batch(self): if self.running_batch is not None and self.running_batch.is_clear(): if not self.input_params.no_lora: # offload model and adapters ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].offload_adapters()) await asyncio.gather(*ret) self.running_batch = None return def _add_token_id_to_req(self, batch: Batch, req_ans): for req_id, (new_token_id, new_gen_metadata) in req_ans.items(): req = batch.id_to_reqs[req_id] req.output_ids.append(new_token_id) req.output_metadata_list.append(new_gen_metadata) return def _send_to_detokenization_proc(self, batch: Batch, req_ans):
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) def get_scheduler(input_params, adapter_dirs): if input_params.scheduler == "vtc_fair": return VTCReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size, adapter_dirs, input_params.fair_weights) elif input_params.scheduler == "pets": return PETSReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.scheduler == "peft": return PEFTReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.batch_num_adapters is not None: return ClusterReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size, input_params.batch_num_adapters) elif input_params.enable_abort: return AbortReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.scheduler == "slora": return ReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) else: raise Exception("unrecognized scheduler") class RouterManager: def __init__(self, weightdir, adapter_dirs, load_way, world_size, eos_id, router_port, detokenization_port, model_rpc_ports, input_params, mode=[], log_stats=True, log_stats_interval=10): self.model_weightdir = weightdir self.adapter_dirs = adapter_dirs self.world_size = world_size self.load_way = load_way self.mode = mode self.input_params = input_params if self.input_params.prefetch: self.prefetch_stream = torch.cuda.Stream() else: self.prefetch_stream = None # get adapter rank self.lora_ranks = {} for lora_dir in adapter_dirs: config, _ = get_lora_config(lora_dir, input_params.dummy) self.lora_ranks[lora_dir] = config["r"] self.lora_ranks[None] = 0 self.req_queue = get_scheduler(input_params, adapter_dirs) self.running_batch: Batch = None self.eos_id = eos_id self.has_wait_tokens = 0 self.max_wait_tokens = 10 context = zmq.asyncio.Context(2) self.recv_from_httpserver = context.socket(zmq.PULL) self.recv_from_httpserver.bind(f"tcp://127.0.0.1:{router_port}") self.send_to_detokenization = context.socket(zmq.PUSH) self.send_to_detokenization.connect(f"tcp://127.0.0.1:{detokenization_port}") self.model_rpc_ports = model_rpc_ports self.stats_tool = Stats(log_stats, log_stats_interval) async def wait_to_model_ready(self): self.model_rpcs: List[ModelRpcClient] = [] for rank_id in range(self.world_size): rpc_model = await start_model_process(port=self.model_rpc_ports[rank_id], world_size=self.world_size) self.model_rpcs.append(rpc_model) init_model_ret = [] for rank_id in range(self.world_size): # async init model process init_model_ret.append( self.model_rpcs[rank_id].init_model( rank_id, self.world_size, self.model_weightdir, self.adapter_dirs, self.input_params.max_total_token_num, self.load_way, self.mode, input_params=self.input_params, prefetch_stream=self.prefetch_stream, )) await asyncio.gather(*init_model_ret) return async def profile_prefill(self): res = [] for rank_id in range(self.world_size): # async init model process res.append( self.model_rpcs[rank_id].profile_prefill()) results = await asyncio.gather(*res) self.alpha_model = AlphaModel(results[0]) self.beta_model = BetaModel(results[0]) # check if the path exists else create it cache_dir = os.path.expanduser("~/.cache/slora") if not os.path.exists(cache_dir): os.makedirs(cache_dir) with open(cache_dir+"/profile_results.pkl", "wb") as f: pickle.dump(results[0], f) return def add_req( self, adapter_dir: str, prompt_ids: List[int], sampling_params: SamplingParams, request_id: str ): req = Req(adapter_dir, request_id, prompt_ids, sampling_params) self.req_queue.append(req) self.send_to_detokenization.send_pyobj(req.to_req_detokenization_state()) return async def abort(self, request_id): if self.running_batch is not None: for req in self.running_batch.reqs: if req.request_id == request_id: req.has_generate_finished = True req.aborted = True for req in self.req_queue.waiting_req_list: if req.request_id == request_id: req.has_generate_finished = True req.aborted = True return async def loop_for_fwd(self,): counter_count = 0 while True: await self._step() counter_count += 1 if self.running_batch is not None: if counter_count % 50 == 0: print("current batch size:", len(self.running_batch.reqs), "token used ratio:", self.running_batch.calcu_used_tokens() / self.input_params.max_total_token_num) pass self.stats_tool.print_stats() if self.running_batch is None: await asyncio.sleep(0.01) # 10ms async def _step(self): """ 事件处理循环 """ # 删除所有已经 finished 的 req if self.running_batch is None: new_batch = self.req_queue.generate_new_batch(self.running_batch, self.lora_ranks) if self.input_params.enable_abort and len(self.req_queue.abort_req_list) > 0: self.send_to_detokenization.send_pyobj(BatchAbortReq(self.req_queue.abort_req_list)) self.req_queue.reset_abort_list() if new_batch is not None: self.stats_tool.count_prompt_tokens(new_batch) self.running_batch = new_batch if not self.input_params.no_lora: # load adapters ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].load_adapters(new_batch.adapter_dirs)) await asyncio.gather(*ret) # merge adapter to base model if self.input_params.scheduler == "peft": torch.cuda.synchronize() ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].merge_adapter()) await asyncio.gather(*ret) torch.cuda.synchronize() await self._prefill_batch(self.running_batch) await self._filter_runing_batch() self.has_wait_tokens = 0 return if self.has_wait_tokens < self.max_wait_tokens: self.stats_tool.count_output_tokens(self.running_batch) # prefetch if (not self.input_params.no_lora and self.input_params.prefetch and (self.has_wait_tokens == self.max_wait_tokens // 2 or self.has_wait_tokens == self.max_wait_tokens - 3) and self.input_params.scheduler != "peft"): next_batch = self.req_queue.next_batch() if next_batch is not None: ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].load_adapters( next_batch.adapter_dirs, prefetch=True)) await asyncio.gather(*ret) await self._decode_batch(self.running_batch) await self._filter_runing_batch() self.has_wait_tokens += 1 return else: new_mini_batch = self.req_queue.generate_new_batch(self.running_batch, self.lora_ranks) if self.input_params.enable_abort and len(self.req_queue.abort_req_list) > 0: self.send_to_detokenization.send_pyobj(BatchAbortReq(self.req_queue.abort_req_list)) self.req_queue.reset_abort_list() if new_mini_batch is not None: self.stats_tool.count_prompt_tokens(new_mini_batch) if not self.input_params.no_lora: ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].load_adapters(new_mini_batch.adapter_dirs)) await asyncio.gather(*ret) await self._prefill_batch(new_mini_batch, minibatch=True) if not new_mini_batch.is_clear(): await self._merge_batch(self.running_batch, new_mini_batch) self.running_batch.merge(new_mini_batch) self.has_wait_tokens = 0 else: self.stats_tool.count_output_tokens(self.running_batch) await self._decode_batch(self.running_batch) await self._filter_runing_batch() async def _init_batch(self, batch: Batch): reqs = [r.to_rpc_obj() for r in batch.reqs] rets = [self.model_rpcs[tp_rank].init_batch(batch.batch_id, reqs) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _prefill_batch(self, batch, minibatch=True): await self._init_batch(batch) rets = [self.model_rpcs[tp_rank].prefill_batch(batch.batch_id) for tp_rank in range(self.world_size)] ans = await asyncio.gather(*rets) if self.world_size != 1: req_to_out_token_id = obtain(ans[0]) else: req_to_out_token_id = ans[0] self._add_token_id_to_req(batch, req_to_out_token_id) has_new_finished_req = batch.mark_finished_req(self.eos_id) self._send_to_detokenization_proc(batch, req_to_out_token_id) await self._handle_finish_req(batch, has_new_finished_req, minibatch=True) return async def _decode_batch(self, batch:Batch): self.req_queue.update_counter(batch) rets = [self.model_rpcs[tp_rank].decode_batch(batch.batch_id) for tp_rank in range(self.world_size)] ans = await asyncio.gather(*rets) if self.world_size != 1: req_to_out_token_id = obtain(ans[0]) else: req_to_out_token_id = ans[0] self._add_token_id_to_req(batch, req_to_out_token_id) has_new_finished_req = batch.mark_finished_req(self.eos_id) self._send_to_detokenization_proc(batch, req_to_out_token_id) await self._handle_finish_req(batch, has_new_finished_req) return async def _filter_batch(self, batch: Batch): req_id_list = [r.request_id for r in batch.reqs] rets = [self.model_rpcs[tp_rank].filter_batch(batch.batch_id, req_id_list) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _merge_batch(self, batch1, batch2): rets = [self.model_rpcs[tp_rank].merge_batch(batch1.batch_id, batch2.batch_id) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _remove_batch(self, batch): rets = [self.model_rpcs[tp_rank].remove_batch(batch.batch_id) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _handle_finish_req(self, batch: Batch, has_new_finished_req, minibatch=False): if has_new_finished_req: batch.filter_finished() # unmerge adapter from base model if self.input_params.scheduler == "peft" and batch.is_clear(): ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].unmerge_adapter()) await asyncio.gather(*ret) if not minibatch and not self.input_params.no_lora: ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].offload_adapters(batch.adapter_dirs)) await asyncio.gather(*ret) if batch.is_clear(): await self._remove_batch(batch) else: await self._filter_batch(batch) return async def _filter_runing_batch(self): if self.running_batch is not None and self.running_batch.is_clear(): if not self.input_params.no_lora: # offload model and adapters ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].offload_adapters()) await asyncio.gather(*ret) self.running_batch = None return def _add_token_id_to_req(self, batch: Batch, req_ans): for req_id, (new_token_id, new_gen_metadata) in req_ans.items(): req = batch.id_to_reqs[req_id] req.output_ids.append(new_token_id) req.output_metadata_list.append(new_gen_metadata) return def _send_to_detokenization_proc(self, batch: Batch, req_ans):
batch_out = BatchTokenIdOut()
8
2023-11-05 04:08:36+00:00
24k
ForceFledgling/proxyhub
proxyhub/api.py
[ { "identifier": "Checker", "path": "proxyhub/checker.py", "snippet": "class Checker:\n \"\"\"Proxy checker.\"\"\"\n\n def __init__(\n self,\n judges,\n max_tries=3,\n timeout=8,\n verify_ssl=False,\n strict=False,\n dnsbl=None,\n real_ext_ip=...
import asyncio import io import signal import warnings from collections import Counter, defaultdict from functools import partial from pprint import pprint from .checker import Checker from .errors import ResolveError from .providers import PROVIDERS, Provider from .proxy import Proxy from .resolver import Resolver from .server import Server from .utils import IPPortPatternLine, log
14,841
request. If not specified, it will use the value specified during the creation of the :class:`Broker` object. Attempts can be made with different proxies. The default value is 3 :param int strategy: (optional) The strategy used for picking proxy from pool. The default value is 'best' :param int min_queue: (optional) The minimum number of proxies to choose from before deciding which is the most suitable to use. The default value is 5 :param int min_req_proxy: (optional) The minimum number of processed requests to estimate the quality of proxy (in accordance with :attr:`max_error_rate` and :attr:`max_resp_time`). The default value is 5 :param int max_error_rate: (optional) The maximum percentage of requests that ended with an error. For example: 0.5 = 50%. If proxy.error_rate exceeds this value, proxy will be removed from the pool. The default value is 0.5 :param int max_resp_time: (optional) The maximum response time in seconds. If proxy.avg_resp_time exceeds this value, proxy will be removed from the pool. The default value is 8 :param bool prefer_connect: (optional) Flag that indicates whether to use the CONNECT method if possible. For example: If is set to True and a proxy supports HTTP proto (GET or POST requests) and CONNECT method, the server will try to use CONNECT method and only after that send the original request. The default value is False :param list http_allowed_codes: (optional) Acceptable HTTP codes returned by proxy on requests. If a proxy return code, not included in this list, it will be considered as a proxy error, not a wrong/unavailable address. For example, if a proxy will return a ``404 Not Found`` response - this will be considered as an error of a proxy. Checks only for HTTP protocol, HTTPS not supported at the moment. By default the list is empty and the response code is not verified :param int backlog: (optional) The maximum number of queued connections passed to listen. The default value is 100 :raises ValueError: If :attr:`limit` is less than or equal to zero. Because a parsing of providers will be endless .. versionadded:: 0.2.0 """ if limit <= 0: raise ValueError( 'In serve mode value of the limit cannot be less than or ' 'equal to zero. Otherwise, a parsing of providers will be ' 'endless' ) self._server = Server( host=host, port=port, proxies=self._proxies, timeout=self._timeout, max_tries=kwargs.pop('max_tries', self._max_tries), loop=self._loop, **kwargs, ) self._server.start() task = asyncio.ensure_future(self.find(limit=limit, **kwargs)) self._all_tasks.append(task) async def _load(self, data, check=True): """Looking for proxies in the passed data. Transform the passed data from [raw string | file-like object | list] to set {(host, port), ...}: {('192.168.0.1', '80'), } """ log.debug('Load proxies from the raw data') if isinstance(data, io.TextIOWrapper): data = data.read() if isinstance(data, str): data = IPPortPatternLine.findall(data) proxies = set(data) for proxy in proxies: await self._handle(proxy, check=check) await self._on_check.join() self._done() async def _grab(self, types=None, check=False): def _get_tasks(by=MAX_CONCURRENT_PROVIDERS): providers = [ pr for pr in self._providers if not types or not pr.proto or bool(pr.proto & types.keys()) ] while providers: tasks = [ asyncio.ensure_future(pr.get_proxies()) for pr in providers[:by] ] del providers[:by] self._all_tasks.extend(tasks) yield tasks log.debug('Start grabbing proxies') while True: for tasks in _get_tasks(): for task in asyncio.as_completed(tasks): proxies = await task for proxy in proxies: await self._handle(proxy, check=check) log.debug('Grab cycle is complete') if self._server: log.debug('fall asleep for %d seconds' % GRAB_PAUSE) await asyncio.sleep(GRAB_PAUSE) log.debug('awaked') else: break await self._on_check.join() self._done() async def _handle(self, proxy, check=False): try:
# Pause between grabbing cycles; in seconds. GRAB_PAUSE = 180 # The maximum number of providers that are parsed concurrently MAX_CONCURRENT_PROVIDERS = 3 class Broker: """The Broker. | One broker to rule them all, one broker to find them, | One broker to bring them all and in the darkness bind them. :param asyncio.Queue queue: (optional) Queue of found/checked proxies :param int timeout: (optional) Timeout of a request in seconds :param int max_conn: (optional) The maximum number of concurrent checks of proxies :param int max_tries: (optional) The maximum number of attempts to check a proxy :param list judges: (optional) Urls of pages that show HTTP headers and IP address. Or :class:`~proxyhub.judge.Judge` objects :param list providers: (optional) Urls of pages where to find proxies. Or :class:`~proxyhub.providers.Provider` objects :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :param loop: (optional) asyncio compatible event loop :param stop_broker_on_sigint: (optional) whether set SIGINT signal on broker object. Useful for a thread other than main thread. .. deprecated:: 0.2.0 Use :attr:`max_conn` and :attr:`max_tries` instead of :attr:`max_concurrent_conn` and :attr:`attempts_conn`. """ def __init__( self, queue=None, timeout=8, max_conn=200, max_tries=3, judges=None, providers=None, verify_ssl=False, loop=None, stop_broker_on_sigint=True, **kwargs, ): self._loop = loop or asyncio.get_event_loop_policy().get_event_loop() self._proxies = queue or asyncio.Queue() self._resolver = Resolver(loop=self._loop) self._timeout = timeout self._verify_ssl = verify_ssl self.unique_proxies = {} self._all_tasks = [] self._checker = None self._server = None self._limit = 0 # not limited self._countries = None max_concurrent_conn = kwargs.get('max_concurrent_conn') if max_concurrent_conn: warnings.warn( '`max_concurrent_conn` is deprecated, use `max_conn` instead', DeprecationWarning, ) if isinstance(max_concurrent_conn, asyncio.Semaphore): max_conn = max_concurrent_conn._value else: max_conn = max_concurrent_conn attempts_conn = kwargs.get('attempts_conn') if attempts_conn: warnings.warn( '`attempts_conn` is deprecated, use `max_tries` instead', DeprecationWarning, ) max_tries = attempts_conn # The maximum number of concurrent checking proxies self._on_check = asyncio.Queue(maxsize=max_conn) self._max_tries = max_tries self._judges = judges self._providers = [ p if isinstance(p, Provider) else Provider(p) for p in (providers or PROVIDERS) ] if stop_broker_on_sigint: try: self._loop.add_signal_handler(signal.SIGINT, self.stop) # add_signal_handler() is not implemented on Win # https://docs.python.org/3.5/library/asyncio-eventloops.html#windows except NotImplementedError: pass async def grab(self, *, countries=None, limit=0): """Gather proxies from the providers without checking. :param list countries: (optional) List of ISO country codes where should be located proxies :param int limit: (optional) The maximum number of proxies :ref:`Example of usage <proxyhub-examples-grab>`. """ self._countries = countries self._limit = limit task = asyncio.ensure_future(self._grab(check=False)) self._all_tasks.append(task) async def find( self, *, types=None, data=None, countries=None, post=False, strict=False, dnsbl=None, limit=0, **kwargs, ): """Gather and check proxies from providers or from a passed data. :ref:`Example of usage <proxyhub-examples-find>`. :param list types: Types (protocols) that need to be check on support by proxy. Supported: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25 And levels of anonymity (HTTP only): Transparent, Anonymous, High :param data: (optional) String or list with proxies. Also can be a file-like object supports `read()` method. Used instead of providers :param list countries: (optional) List of ISO country codes where should be located proxies :param bool post: (optional) Flag indicating use POST instead of GET for requests when checking proxies :param bool strict: (optional) Flag indicating that anonymity levels of types (protocols) supported by a proxy must be equal to the requested types and levels of anonymity. By default, strict mode is off and for a successful check is enough to satisfy any one of the requested types :param list dnsbl: (optional) Spam databases for proxy checking. `Wiki <https://en.wikipedia.org/wiki/DNSBL>`_ :param int limit: (optional) The maximum number of proxies :raises ValueError: If :attr:`types` not given. .. versionchanged:: 0.2.0 Added: :attr:`post`, :attr:`strict`, :attr:`dnsbl`. Changed: :attr:`types` is required. """ ip = await self._resolver.get_real_ext_ip() types = _update_types(types) if not types: raise ValueError('`types` is required') self._checker = Checker( judges=self._judges, timeout=self._timeout, verify_ssl=self._verify_ssl, max_tries=self._max_tries, real_ext_ip=ip, types=types, post=post, strict=strict, dnsbl=dnsbl, loop=self._loop, ) self._countries = countries self._limit = limit tasks = [asyncio.ensure_future(self._checker.check_judges())] if data: task = asyncio.ensure_future(self._load(data, check=True)) else: task = asyncio.ensure_future(self._grab(types, check=True)) tasks.append(task) self._all_tasks.extend(tasks) def serve(self, host='127.0.0.1', port=8888, limit=100, **kwargs): """Start a local proxy server. The server distributes incoming requests to a pool of found proxies. When the server receives an incoming request, it chooses the optimal proxy (based on the percentage of errors and average response time) and passes to it the incoming request. In addition to the parameters listed below are also accept all the parameters of the :meth:`.find` method and passed it to gather proxies to a pool. :ref:`Example of usage <proxyhub-examples-server>`. :param str host: (optional) Host of local proxy server :param int port: (optional) Port of local proxy server :param int limit: (optional) When will be found a requested number of working proxies, checking of new proxies will be lazily paused. Checking will be resumed if all the found proxies will be discarded in the process of working with them (see :attr:`max_error_rate`, :attr:`max_resp_time`). And will continue until it finds one working proxy and paused again. The default value is 100 :param int max_tries: (optional) The maximum number of attempts to handle an incoming request. If not specified, it will use the value specified during the creation of the :class:`Broker` object. Attempts can be made with different proxies. The default value is 3 :param int strategy: (optional) The strategy used for picking proxy from pool. The default value is 'best' :param int min_queue: (optional) The minimum number of proxies to choose from before deciding which is the most suitable to use. The default value is 5 :param int min_req_proxy: (optional) The minimum number of processed requests to estimate the quality of proxy (in accordance with :attr:`max_error_rate` and :attr:`max_resp_time`). The default value is 5 :param int max_error_rate: (optional) The maximum percentage of requests that ended with an error. For example: 0.5 = 50%. If proxy.error_rate exceeds this value, proxy will be removed from the pool. The default value is 0.5 :param int max_resp_time: (optional) The maximum response time in seconds. If proxy.avg_resp_time exceeds this value, proxy will be removed from the pool. The default value is 8 :param bool prefer_connect: (optional) Flag that indicates whether to use the CONNECT method if possible. For example: If is set to True and a proxy supports HTTP proto (GET or POST requests) and CONNECT method, the server will try to use CONNECT method and only after that send the original request. The default value is False :param list http_allowed_codes: (optional) Acceptable HTTP codes returned by proxy on requests. If a proxy return code, not included in this list, it will be considered as a proxy error, not a wrong/unavailable address. For example, if a proxy will return a ``404 Not Found`` response - this will be considered as an error of a proxy. Checks only for HTTP protocol, HTTPS not supported at the moment. By default the list is empty and the response code is not verified :param int backlog: (optional) The maximum number of queued connections passed to listen. The default value is 100 :raises ValueError: If :attr:`limit` is less than or equal to zero. Because a parsing of providers will be endless .. versionadded:: 0.2.0 """ if limit <= 0: raise ValueError( 'In serve mode value of the limit cannot be less than or ' 'equal to zero. Otherwise, a parsing of providers will be ' 'endless' ) self._server = Server( host=host, port=port, proxies=self._proxies, timeout=self._timeout, max_tries=kwargs.pop('max_tries', self._max_tries), loop=self._loop, **kwargs, ) self._server.start() task = asyncio.ensure_future(self.find(limit=limit, **kwargs)) self._all_tasks.append(task) async def _load(self, data, check=True): """Looking for proxies in the passed data. Transform the passed data from [raw string | file-like object | list] to set {(host, port), ...}: {('192.168.0.1', '80'), } """ log.debug('Load proxies from the raw data') if isinstance(data, io.TextIOWrapper): data = data.read() if isinstance(data, str): data = IPPortPatternLine.findall(data) proxies = set(data) for proxy in proxies: await self._handle(proxy, check=check) await self._on_check.join() self._done() async def _grab(self, types=None, check=False): def _get_tasks(by=MAX_CONCURRENT_PROVIDERS): providers = [ pr for pr in self._providers if not types or not pr.proto or bool(pr.proto & types.keys()) ] while providers: tasks = [ asyncio.ensure_future(pr.get_proxies()) for pr in providers[:by] ] del providers[:by] self._all_tasks.extend(tasks) yield tasks log.debug('Start grabbing proxies') while True: for tasks in _get_tasks(): for task in asyncio.as_completed(tasks): proxies = await task for proxy in proxies: await self._handle(proxy, check=check) log.debug('Grab cycle is complete') if self._server: log.debug('fall asleep for %d seconds' % GRAB_PAUSE) await asyncio.sleep(GRAB_PAUSE) log.debug('awaked') else: break await self._on_check.join() self._done() async def _handle(self, proxy, check=False): try:
proxy = await Proxy.create(
4
2023-11-05 13:28:57+00:00
24k
TheFunny/ArisuAutoSweeper
module/webui/app.py
[ { "identifier": "AzurLaneConfig", "path": "module/config/config.py", "snippet": "class AzurLaneConfig(ConfigUpdater, ManualConfig, GeneratedConfig, ConfigWatcher):\n stop_event: threading.Event = None\n bound = {}\n\n # Class property\n is_hoarding_task = True\n\n def __setattr__(self, ke...
import argparse import queue import threading import time import module.webui.lang as lang from datetime import datetime from functools import partial from typing import Dict, List, Optional from pywebio import config as webconfig from pywebio.output import ( Output, clear, close_popup, popup, put_button, put_buttons, put_collapse, put_column, put_error, put_html, put_link, put_loading, put_markdown, put_row, put_scope, put_table, put_text, put_warning, toast, use_scope, ) from pywebio.pin import pin, pin_on_change from pywebio.session import go_app, info, local, register_thread, run_js, set_env from module.config.config import AzurLaneConfig, Function from module.config.utils import ( alas_instance, alas_template, deep_get, deep_iter, deep_set, dict_to_kv, filepath_args, filepath_config, read_file, ) from module.logger import logger from module.webui.base import Frame from module.webui.fake import ( get_config_mod, load_config, ) from module.webui.fastapi import asgi_app from module.webui.lang import _t, t from module.webui.pin import put_input, put_select from module.webui.process_manager import ProcessManager from module.webui.remote_access import RemoteAccess from module.webui.setting import State from module.webui.updater import updater from module.webui.utils import ( Icon, Switch, TaskHandler, add_css, filepath_css, get_alas_config_listen_path, get_localstorage, get_window_visibility_state, login, parse_pin_value, raise_exception, re_fullmatch, ) from module.webui.widgets import ( BinarySwitchButton, RichLog, T_Output_Kwargs, put_icon_buttons, put_loading_text, put_none, put_output, )
15,235
else: name = arg color = arg_dict.get("color", "#777777") nodata = t("Gui.Dashboard.NoData") def set_value(dic): if "total" in dic.get("attrs", []) and config.get("total") is not None: return [ put_text(config.get("value", nodata)).style("--dashboard-value--"), put_text(f' / {config.get("total", "")}').style("--dashboard-time--"), ] else: return [ put_text(config.get("value", nodata)).style("--dashboard-value--"), ] with use_scope(f"dashboard-row-{arg}", clear=True): put_html(f'<div><div class="dashboard-icon" style="background-color:{color}"></div>'), put_scope(f"dashboard-content-{arg}", [ put_scope(f"dashboard-value-{arg}", set_value(arg_dict)), put_scope(f"dashboard-time-{arg}", [ put_text(f"{name} - {lang.readable_time(config.get('time', ''))}").style("--dashboard-time--"), ]) ]) @use_scope("content", clear=True) def alas_overview(self) -> None: self.init_menu(name="Overview") self.set_title(t(f"Gui.MenuAlas.Overview")) put_scope("overview", [put_scope("schedulers"), put_scope("logs")]) with use_scope("schedulers"): put_scope( "scheduler-bar", [ put_text(t("Gui.Overview.Scheduler")).style( "font-size: 1.25rem; margin: auto .5rem auto;" ), put_scope("scheduler_btn"), ], ) put_scope( "running", [ put_text(t("Gui.Overview.Running")), put_html('<hr class="hr-group">'), put_scope("running_tasks"), ], ) put_scope( "pending", [ put_text(t("Gui.Overview.Pending")), put_html('<hr class="hr-group">'), put_scope("pending_tasks"), ], ) put_scope( "waiting", [ put_text(t("Gui.Overview.Waiting")), put_html('<hr class="hr-group">'), put_scope("waiting_tasks"), ], ) switch_scheduler = BinarySwitchButton( label_on=t("Gui.Button.Stop"), label_off=t("Gui.Button.Start"), onclick_on=lambda: self.alas.stop(), onclick_off=lambda: self.alas.start(None, updater.event), get_state=lambda: self.alas.alive, color_on="off", color_off="on", scope="scheduler_btn", ) log = RichLog("log") with use_scope("logs"): put_scope("log-bar", [ put_scope("log-title", [ put_text(t("Gui.Overview.Log")).style("font-size: 1.25rem; margin: auto .5rem auto;"), put_scope("log-title-btns", [ put_scope("log_scroll_btn"), ]), ]), put_html('<hr class="hr-group">'), put_scope("dashboard", [ # Empty dashboard, values will be updated in alas_update_overview_task() put_scope(f"dashboard-row-{arg}", []) for arg in self.ALAS_STORED.keys() if deep_get(self.ALAS_STORED, keys=[arg, "order"], default=0) # Empty content to left-align last row ] + [put_html("<i></i>")] * min(len(self.ALAS_STORED), 4)) ]) put_scope("log", [put_html("")]) log.console.width = log.get_width() switch_log_scroll = BinarySwitchButton( label_on=t("Gui.Button.ScrollON"), label_off=t("Gui.Button.ScrollOFF"), onclick_on=lambda: log.set_scroll(False), onclick_off=lambda: log.set_scroll(True), get_state=lambda: log.keep_bottom, color_on="on", color_off="off", scope="log_scroll_btn", ) self.task_handler.add(switch_scheduler.g(), 1, True) self.task_handler.add(switch_log_scroll.g(), 1, True) self.task_handler.add(self.alas_update_overview_task, 10, True) self.task_handler.add(log.put_log(self.alas), 0.25, True) def _init_alas_config_watcher(self) -> None: def put_queue(path, value): self.modified_config_queue.put({"name": path, "value": value})
task_handler = TaskHandler() class AlasGUI(Frame): ALAS_MENU: Dict[str, Dict[str, List[str]]] ALAS_ARGS: Dict[str, Dict[str, Dict[str, Dict[str, str]]]] ALAS_STORED: Dict[str, Dict[str, Dict[str, str]]] theme = "default" def initial(self) -> None: self.ALAS_MENU = read_file(filepath_args("menu", self.alas_mod)) self.ALAS_ARGS = read_file(filepath_args("args", self.alas_mod)) self.ALAS_STORED = read_file(filepath_args("stored", self.alas_mod)) self._init_alas_config_watcher() def __init__(self) -> None: super().__init__() # modified keys, return values of pin_wait_change() self.modified_config_queue = queue.Queue() # alas config name self.alas_name = "" self.alas_mod = "alas" self.alas_config = AzurLaneConfig("template") self.initial() @use_scope("aside", clear=True) def set_aside(self) -> None: # TODO: update put_icon_buttons() put_icon_buttons( Icon.DEVELOP, buttons=[ {"label": t("Gui.Aside.Home"), "value": "Home", "color": "aside"} ], onclick=[self.ui_develop], ), for name in alas_instance(): put_icon_buttons( Icon.RUN, buttons=[{"label": name, "value": name, "color": "aside"}], onclick=self.ui_alas, ) put_icon_buttons( Icon.ADD, buttons=[ {"label": t("Gui.Aside.AddAlas"), "value": "AddAlas", "color": "aside"} ], onclick=[self.ui_add_alas], ), @use_scope("header_status") def set_status(self, state: int) -> None: """ Args: state (int): 1 (running) 2 (not running) 3 (warning, stop unexpectedly) 4 (stop for update) 0 (hide) -1 (*state not changed) """ if state == -1: return clear() if state == 1: put_loading_text(t("Gui.Status.Running"), color="success") elif state == 2: put_loading_text(t("Gui.Status.Inactive"), color="secondary", fill=True) elif state == 3: put_loading_text(t("Gui.Status.Warning"), shape="grow", color="warning") elif state == 4: put_loading_text(t("Gui.Status.Updating"), shape="grow", color="success") @classmethod def set_theme(cls, theme="default") -> None: cls.theme = theme State.deploy_config.Theme = theme State.theme = theme webconfig(theme=theme) @use_scope("menu", clear=True) def alas_set_menu(self) -> None: """ Set menu """ put_buttons( [{ "label": t("Gui.MenuAlas.Overview"), "value": "Overview", "color": "menu", }], onclick=[self.alas_overview], ).style(f"--menu-Overview--") for menu, task_data in self.ALAS_MENU.items(): if task_data.get("page") == "tool": _onclick = self.alas_daemon_overview else: _onclick = self.alas_set_group if task_data.get("menu") == "collapse": task_btn_list = [ put_buttons( [{ "label": t(f"Task.{task}.name"), "value": task, "color": "menu", }], onclick=_onclick, ).style(f"--menu-{task}--") for task in task_data.get("tasks", []) ] put_collapse(title=t(f"Menu.{menu}.name"), content=task_btn_list) else: title = t(f"Menu.{menu}.name") put_html('<div class="hr-task-group-box">' '<span class="hr-task-group-line"></span>' f'<span class="hr-task-group-text">{title}</span>' '<span class="hr-task-group-line"></span>' '</div>' ) for task in task_data.get("tasks", []): put_buttons( [{ "label": t(f"Task.{task}.name"), "value": task, "color": "menu", }], onclick=_onclick, ).style(f"--menu-{task}--").style(f"padding-left: 0.75rem") self.alas_overview() @use_scope("content", clear=True) def alas_set_group(self, task: str) -> None: """ Set arg groups from dict """ self.init_menu(name=task) self.set_title(t(f"Task.{task}.name")) put_scope("_groups", [put_none(), put_scope("groups"), put_scope("navigator")]) task_help: str = t(f"Task.{task}.help") if task_help: put_scope( "group__info", scope="groups", content=[put_text(task_help).style("font-size: 1rem")], ) config = self.alas_config.read_file(self.alas_name) for group, arg_dict in deep_iter(self.ALAS_ARGS[task], depth=1): if self.set_group(group, arg_dict, config, task): self.set_navigator(group) @use_scope("groups") def set_group(self, group, arg_dict, config, task): group_name = group[0] output_list: List[Output] = [] for arg, arg_dict in deep_iter(arg_dict, depth=1): output_kwargs: T_Output_Kwargs = arg_dict.copy() # Skip hide display: Optional[str] = output_kwargs.pop("display", None) if display == "hide": continue # Disable elif display == "disabled": output_kwargs["disabled"] = True # Output type output_kwargs["widget_type"] = output_kwargs.pop("type") arg_name = arg[0] # [arg_name,] # Internal pin widget name output_kwargs["name"] = f"{task}_{group_name}_{arg_name}" # Display title output_kwargs["title"] = t(f"{group_name}.{arg_name}.name") # Get value from config value = deep_get( config, [task, group_name, arg_name], output_kwargs["value"] ) # idk value = str(value) if isinstance(value, datetime) else value # Default value output_kwargs["value"] = value # Options output_kwargs["options"] = options = output_kwargs.pop("option", []) # Options label options_label = [] for opt in options: options_label.append(t(f"{group_name}.{arg_name}.{opt}")) output_kwargs["options_label"] = options_label # Help arg_help = t(f"{group_name}.{arg_name}.help") if arg_help == "" or not arg_help: arg_help = None output_kwargs["help"] = arg_help # Invalid feedback output_kwargs["invalid_feedback"] = t("Gui.Text.InvalidFeedBack", value) o = put_output(output_kwargs) if o is not None: # output will inherit current scope when created, override here o.spec["scope"] = f"#pywebio-scope-group_{group_name}" output_list.append(o) if not output_list: return 0 with use_scope(f"group_{group_name}"): put_text(t(f"{group_name}._info.name")) group_help = t(f"{group_name}._info.help") if group_help != "": put_text(group_help) put_html('<hr class="hr-group">') for output in output_list: output.show() return len(output_list) @use_scope("navigator") def set_navigator(self, group): js = f""" $("#pywebio-scope-groups").scrollTop( $("#pywebio-scope-group_{group[0]}").position().top + $("#pywebio-scope-groups").scrollTop() - 59 ) """ put_button( label=t(f"{group[0]}._info.name"), onclick=lambda: run_js(js), color="navigator", ) def set_dashboard(self, arg, arg_dict, config): i18n = arg_dict.get('i18n') if i18n: name = t(i18n) else: name = arg color = arg_dict.get("color", "#777777") nodata = t("Gui.Dashboard.NoData") def set_value(dic): if "total" in dic.get("attrs", []) and config.get("total") is not None: return [ put_text(config.get("value", nodata)).style("--dashboard-value--"), put_text(f' / {config.get("total", "")}').style("--dashboard-time--"), ] else: return [ put_text(config.get("value", nodata)).style("--dashboard-value--"), ] with use_scope(f"dashboard-row-{arg}", clear=True): put_html(f'<div><div class="dashboard-icon" style="background-color:{color}"></div>'), put_scope(f"dashboard-content-{arg}", [ put_scope(f"dashboard-value-{arg}", set_value(arg_dict)), put_scope(f"dashboard-time-{arg}", [ put_text(f"{name} - {lang.readable_time(config.get('time', ''))}").style("--dashboard-time--"), ]) ]) @use_scope("content", clear=True) def alas_overview(self) -> None: self.init_menu(name="Overview") self.set_title(t(f"Gui.MenuAlas.Overview")) put_scope("overview", [put_scope("schedulers"), put_scope("logs")]) with use_scope("schedulers"): put_scope( "scheduler-bar", [ put_text(t("Gui.Overview.Scheduler")).style( "font-size: 1.25rem; margin: auto .5rem auto;" ), put_scope("scheduler_btn"), ], ) put_scope( "running", [ put_text(t("Gui.Overview.Running")), put_html('<hr class="hr-group">'), put_scope("running_tasks"), ], ) put_scope( "pending", [ put_text(t("Gui.Overview.Pending")), put_html('<hr class="hr-group">'), put_scope("pending_tasks"), ], ) put_scope( "waiting", [ put_text(t("Gui.Overview.Waiting")), put_html('<hr class="hr-group">'), put_scope("waiting_tasks"), ], ) switch_scheduler = BinarySwitchButton( label_on=t("Gui.Button.Stop"), label_off=t("Gui.Button.Start"), onclick_on=lambda: self.alas.stop(), onclick_off=lambda: self.alas.start(None, updater.event), get_state=lambda: self.alas.alive, color_on="off", color_off="on", scope="scheduler_btn", ) log = RichLog("log") with use_scope("logs"): put_scope("log-bar", [ put_scope("log-title", [ put_text(t("Gui.Overview.Log")).style("font-size: 1.25rem; margin: auto .5rem auto;"), put_scope("log-title-btns", [ put_scope("log_scroll_btn"), ]), ]), put_html('<hr class="hr-group">'), put_scope("dashboard", [ # Empty dashboard, values will be updated in alas_update_overview_task() put_scope(f"dashboard-row-{arg}", []) for arg in self.ALAS_STORED.keys() if deep_get(self.ALAS_STORED, keys=[arg, "order"], default=0) # Empty content to left-align last row ] + [put_html("<i></i>")] * min(len(self.ALAS_STORED), 4)) ]) put_scope("log", [put_html("")]) log.console.width = log.get_width() switch_log_scroll = BinarySwitchButton( label_on=t("Gui.Button.ScrollON"), label_off=t("Gui.Button.ScrollOFF"), onclick_on=lambda: log.set_scroll(False), onclick_off=lambda: log.set_scroll(True), get_state=lambda: log.keep_bottom, color_on="on", color_off="off", scope="log_scroll_btn", ) self.task_handler.add(switch_scheduler.g(), 1, True) self.task_handler.add(switch_log_scroll.g(), 1, True) self.task_handler.add(self.alas_update_overview_task, 10, True) self.task_handler.add(log.put_log(self.alas), 0.25, True) def _init_alas_config_watcher(self) -> None: def put_queue(path, value): self.modified_config_queue.put({"name": path, "value": value})
for path in get_alas_config_listen_path(self.ALAS_ARGS):
29
2023-11-01 07:09:45+00:00
24k
radekd91/inferno
inferno/models/DECA.py
[ { "identifier": "EmoNetLoss", "path": "inferno/layers/losses/EmoNetLoss.py", "snippet": "class EmoNetLoss(EmoLossBase):\n# class EmoNetLoss(object):\n\n def __init__(self, device, emonet=None, trainable=False, normalize_features=False, emo_feat_loss=None, au_loss=None):\n if emonet is None:\n ...
import os, sys import torch import torchvision import torch.nn.functional as F import torchvision.transforms.functional as F_v import numpy as np import cv2 import inferno.layers.losses.DecaLosses as lossfunc import inferno.layers.losses.MediaPipeLandmarkLosses as lossfunc_mp import inferno.utils.DecaUtils as util import pytorch_lightning.plugins.environments.lightning_environment as le import psutil import adabound import copy from pytorch_lightning import LightningModule from pytorch_lightning.loggers import WandbLogger from inferno.layers.losses.EmoNetLoss import EmoNetLoss, create_emo_loss, create_au_loss from skimage.io import imread from skimage.transform import resize from pathlib import Path from inferno.models.Renderer import SRenderY from inferno.models.DecaEncoder import ResnetEncoder, SecondHeadResnet, SwinEncoder from inferno.models.DecaDecoder import Generator, GeneratorAdaIn from inferno.models.DecaFLAME import FLAME, FLAMETex, FLAME_mediapipe from inferno.models.EmotionMLP import EmotionMLP from inferno.datasets.AffWild2Dataset import Expression7 from inferno.datasets.AffectNetDataModule import AffectNetExpressions from inferno.utils.lightning_logging import _log_array_image, _log_wandb_image, _torch_image2np from enum import Enum from inferno.utils.other import class_from_str, get_path_to_assets from inferno.layers.losses.VGGLoss import VGG19Loss from omegaconf import OmegaConf, open_dict from inferno.models.temporal.external.LipReadingLoss import LipReadingLoss from .StarGAN import StarGANWrapper from inferno.models.EmoNetRegressor import EmoNetRegressor, EmonetRegressorStatic from .mica.config import get_cfg_defaults from .mica.mica import MICA from .mica.MicaInputProcessing import MicaInputProcessor from inferno.utils.other import get_path_to_assets from inferno.models.IO import locate_checkpoint
19,546
uv_detail_vertices = uv_coarse_vertices + \ uv_z * uv_coarse_normals + \ self.fixed_uv_dis[None, None, :,:] * uv_coarse_normals #.detach() dense_vertices = uv_detail_vertices.permute(0, 2, 3, 1).reshape([batch_size, -1, 3]) uv_detail_normals = util.vertex_normals(dense_vertices, self.render.dense_faces.expand(batch_size, -1, -1)) uv_detail_normals = uv_detail_normals.reshape( [batch_size, uv_coarse_vertices.shape[2], uv_coarse_vertices.shape[3], 3]).permute(0, 3, 1, 2) # uv_detail_normals = uv_detail_normals*self.uv_face_eye_mask + uv_coarse_normals*(1-self.uv_face_eye_mask) # uv_detail_normals = util.gaussian_blur(uv_detail_normals) return uv_detail_normals, uv_coarse_vertices def visualize(self, visdict, savepath, catdim=1): grids = {} for key in visdict: # print(key) if visdict[key] is None: continue grids[key] = torchvision.utils.make_grid( F.interpolate(visdict[key], [self.config.image_size, self.config.image_size])).detach().cpu() grid = torch.cat(list(grids.values()), catdim) grid_image = (grid.numpy().transpose(1, 2, 0).copy() * 255)[:, :, [2, 1, 0]] grid_image = np.minimum(np.maximum(grid_image, 0), 255).astype(np.uint8) if savepath is not None: cv2.imwrite(savepath, grid_image) return grid_image def create_mesh(self, opdict, dense_template): ''' vertices: [nv, 3], tensor texture: [3, h, w], tensor ''' i = 0 vertices = opdict['verts'][i].cpu().numpy() faces = self.render.faces[0].cpu().numpy() if 'uv_texture_gt' in opdict.keys(): texture = util.tensor2image(opdict['uv_texture_gt'][i]) else: texture = None uvcoords = self.render.raw_uvcoords[0].cpu().numpy() uvfaces = self.render.uvfaces[0].cpu().numpy() # save coarse mesh, with texture and normal map if 'uv_detail_normals' in opdict.keys(): normal_map = util.tensor2image(opdict['uv_detail_normals'][i]*0.5 + 0.5) # upsample mesh, save detailed mesh texture = texture[:, :, [2, 1, 0]] normals = opdict['normals'][i].cpu().numpy() displacement_map = opdict['displacement_map'][i].detach().cpu().numpy().squeeze() dense_vertices, dense_colors, dense_faces = util.upsample_mesh(vertices, normals, faces, displacement_map, texture, dense_template) else: normal_map = None dense_vertices = None dense_colors = None dense_faces = None return vertices, faces, texture, uvcoords, uvfaces, normal_map, dense_vertices, dense_faces, dense_colors def save_obj(self, filename, opdict, dense_template, mode ='detail'): if mode not in ['coarse', 'detail', 'both']: raise ValueError(f"Invalid mode '{mode}. Expected modes are: 'coarse', 'detail', 'both'") vertices, faces, texture, uvcoords, uvfaces, normal_map, dense_vertices, dense_faces, dense_colors \ = self.create_mesh(opdict, dense_template) if mode == 'both': if isinstance(filename, list): filename_coarse = filename[0] filename_detail = filename[1] else: filename_coarse = filename filename_detail = filename.replace('.obj', '_detail.obj') elif mode == 'coarse': filename_coarse = filename else: filename_detail = filename if mode in ['coarse', 'both']: util.write_obj(str(filename_coarse), vertices, faces, texture=texture, uvcoords=uvcoords, uvfaces=uvfaces, normal_map=normal_map) if mode in ['detail', 'both']: util.write_obj(str(filename_detail), dense_vertices, dense_faces, colors = dense_colors, inverse_face_order=True) class ExpDECAInterface(object): """ This serves as an interface for EMOCA-like classes that need to use a different sub class but retain the EMOCA functionality. See EMICA_v2 for an example. """ def _create_model(self): # E_flame should be fixed for expression EMOCA self.E_flame.requires_grad_(False) # 2) add expression decoder if self.config.expression_backbone == 'deca_parallel': ## a) Attach a parallel flow of FCs onto the original DECA coarse backbone. (Only the second FC head is trainable) self.E_expression = SecondHeadResnet(self.E_flame, self.n_exp_param, 'same') elif self.config.expression_backbone == 'deca_clone': ## b) Clones the original DECA coarse decoder (and the entire decoder will be trainable) - This is in final EMOCA. #TODO this will only work for Resnet. Make this work for the other backbones (Swin) as well. self.E_expression = ResnetEncoder(self.n_exp_param) # clone parameters of the ResNet self.E_expression.encoder.load_state_dict(self.E_flame.encoder.state_dict()) elif self.config.expression_backbone == 'emonet_trainable': # Trainable EmoNet instead of Resnet (deprecated) self.E_expression = EmoNetRegressor(self.n_exp_param) elif self.config.expression_backbone == 'emonet_static': # Frozen EmoNet with a trainable head instead of Resnet (deprecated)
""" Author: Radek Danecek Copyright (c) 2022, Radek Danecek All rights reserved. # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is # holder of all proprietary rights on this computer program. # Using this computer program means that you agree to the terms # in the LICENSE file included with this software distribution. # Any use not explicitly granted by the LICENSE is prohibited. # # Copyright©2022 Max-Planck-Gesellschaft zur Förderung # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute # for Intelligent Systems. All rights reserved. # # For comments or questions, please email us at emoca@tue.mpg.de # For commercial licensing contact, please contact ps-license@tuebingen.mpg.de Parts of the code were adapted from the original DECA release: https://github.com/YadiraF/DECA/ """ # from time import time torch.backends.cudnn.benchmark = True class DecaMode(Enum): COARSE = 1 # when switched on, only coarse part of DECA-based networks is used DETAIL = 2 # when switched on, only coarse and detail part of DECA-based networks is used class DecaModule(LightningModule): """ DecaModule is a PL module that implements DECA-inspired face reconstruction networks. """ def __init__(self, model_params, learning_params, inout_params, stage_name = ""): """ :param model_params: a DictConfig of parameters about the model itself :param learning_params: a DictConfig of parameters corresponding to the learning process (such as optimizer, lr and others) :param inout_params: a DictConfig of parameters about input and output (where checkpoints and visualizations are saved) """ super().__init__() self.learning_params = learning_params self.inout_params = inout_params # detail conditioning - what is given as the conditioning input to the detail generator in detail stage training if 'detail_conditioning' not in model_params.keys(): # jaw, expression and detail code by default self.detail_conditioning = ['jawpose', 'expression', 'detail'] OmegaConf.set_struct(model_params, True) with open_dict(model_params): model_params.detail_conditioning = self.detail_conditioning else: self.detail_conditioning = model_params.detail_conditioning # deprecated and is not used if 'detailemo_conditioning' not in model_params.keys(): self.detailemo_conditioning = [] OmegaConf.set_struct(model_params, True) with open_dict(model_params): model_params.detailemo_conditioning = self.detailemo_conditioning else: self.detailemo_conditioning = model_params.detailemo_conditioning supported_conditioning_keys = ['identity', 'jawpose', 'expression', 'detail', 'detailemo'] for c in self.detail_conditioning: if c not in supported_conditioning_keys: raise ValueError(f"Conditioning on '{c}' is not supported. Supported conditionings: {supported_conditioning_keys}") for c in self.detailemo_conditioning: if c not in supported_conditioning_keys: raise ValueError(f"Conditioning on '{c}' is not supported. Supported conditionings: {supported_conditioning_keys}") # which type of DECA network is used if 'deca_class' not in model_params.keys() or model_params.deca_class is None: print(f"Deca class is not specified. Defaulting to {str(DECA.__class__.__name__)}") # vanilla DECA by default (not EMOCA) deca_class = DECA else: # other type of DECA-inspired networks possible (such as ExpDECA, which is what EMOCA) deca_class = class_from_str(model_params.deca_class, sys.modules[__name__]) # instantiate the network self.deca = deca_class(config=model_params) self.mode = DecaMode[str(model_params.mode).upper()] self.stage_name = stage_name if self.stage_name is None: self.stage_name = "" if len(self.stage_name) > 0: self.stage_name += "_" # initialize the emotion perceptual loss (used for EMOCA supervision) self.emonet_loss = None self._init_emotion_loss() # initialize the au perceptual loss (not currently used in EMOCA) self.au_loss = None self._init_au_loss() # initialize the lip reading perceptual loss (not currently used in original EMOCA) self.lipread_loss = None self._init_lipread_loss() # MPL regressor from the encoded space to emotion labels (not used in EMOCA but could be used for direct emotion supervision) if 'mlp_emotion_predictor' in self.deca.config.keys(): # self._build_emotion_mlp(self.deca.config.mlp_emotion_predictor) self.emotion_mlp = EmotionMLP(self.deca.config.mlp_emotion_predictor, model_params) else: self.emotion_mlp = None def get_input_image_size(self): return (self.deca.config.image_size, self.deca.config.image_size) def _instantiate_deca(self, model_params): """ Instantiate the DECA network. """ # which type of DECA network is used if 'deca_class' not in model_params.keys() or model_params.deca_class is None: print(f"Deca class is not specified. Defaulting to {str(DECA.__class__.__name__)}") # vanilla DECA by default (not EMOCA) deca_class = DECA else: # other type of DECA-inspired networks possible (such as ExpDECA, which is what EMOCA) deca_class = class_from_str(model_params.deca_class, sys.modules[__name__]) # instantiate the network self.deca = deca_class(config=model_params) def _init_emotion_loss(self): """ Initialize the emotion perceptual loss (used for EMOCA supervision) """ if 'emonet_weight' in self.deca.config.keys() and bool(self.deca.config.get('emonet_model_path', False)): if self.emonet_loss is not None: emoloss_force_override = True if 'emoloss_force_override' in self.deca.config.keys() and self.deca.config.emoloss_force_override else False if self.emonet_loss.is_trainable(): if not emoloss_force_override: print("The old emonet loss is trainable and will not be overrided or replaced.") return # raise NotImplementedError("The old emonet loss was trainable. Changing a trainable loss is probably now " # "what you want implicitly. If you need this, use the '`'emoloss_force_override' config.") else: print("The old emonet loss is trainable but override is set so it will be replaced.") else: print("The old emonet loss is not trainable. It will be replaced.") if 'emonet_model_path' in self.deca.config.keys(): emonet_model_path = self.deca.config.emonet_model_path else: emonet_model_path=None # self.emonet_loss = EmoNetLoss(self.device, emonet=emonet_model_path) emoloss_trainable = True if 'emoloss_trainable' in self.deca.config.keys() and self.deca.config.emoloss_trainable else False emoloss_dual = True if 'emoloss_dual' in self.deca.config.keys() and self.deca.config.emoloss_dual else False normalize_features = self.deca.config.normalize_features if 'normalize_features' in self.deca.config.keys() else None emo_feat_loss = self.deca.config.emo_feat_loss if 'emo_feat_loss' in self.deca.config.keys() else None old_emonet_loss = self.emonet_loss self.emonet_loss = create_emo_loss(self.device, emoloss=emonet_model_path, trainable=emoloss_trainable, dual=emoloss_dual, normalize_features=normalize_features, emo_feat_loss=emo_feat_loss) if old_emonet_loss is not None and type(old_emonet_loss) != self.emonet_loss: print(f"The old emonet loss {old_emonet_loss.__class__.__name__} is replaced during reconfiguration by " f"new emotion loss {self.emonet_loss.__class__.__name__}") else: self.emonet_loss = None def _init_au_loss(self): """ Initialize the au perceptual loss (not currently used in EMOCA) """ if 'au_loss' in self.deca.config.keys(): if self.au_loss is not None: force_override = True if 'force_override' in self.deca.config.au_loss.keys() \ and self.deca.config.au_loss.force_override else False if self.au_loss.is_trainable(): if not force_override: print("The old AU loss is trainable and will not be overrided or replaced.") return # raise NotImplementedError("The old emonet loss was trainable. Changing a trainable loss is probably now " # "what you want implicitly. If you need this, use the '`'emoloss_force_override' config.") else: print("The old AU loss is trainable but override is set so it will be replaced.") else: print("The old AU loss is not trainable. It will be replaced.") old_au_loss = self.emonet_loss self.au_loss = create_au_loss(self.device, self.deca.config.au_loss) else: self.au_loss = None def _init_lipread_loss(self): """ Initialize the au perceptual loss (not currently used in EMOCA) """ if 'lipread_loss' in self.deca.config.keys() and self.deca.config.lipread_loss.get('load', True): if self.lipread_loss is not None: force_override = True if 'force_override' in self.deca.config.lipread_loss.keys() \ and self.deca.config.lipread_loss.force_override else False assert self.lipread_loss.is_trainable(), "Trainable lip reading loss is not supported yet." if self.lipread_loss.is_trainable(): if not force_override: print("The old lip reading loss is trainable and will not be overrided or replaced.") return # raise NotImplementedError("The old emonet loss was trainable. Changing a trainable loss is probably now " # "what you want implicitly. If you need this, use the '`'emoloss_force_override' config.") else: print("The old lip reading loss is trainable but override is set so it will be replaced.") else: print("The old lip reading loss is not trainable. It will be replaced.") # old_lipread_loss = self.emonet_loss self.lipread_loss = LipReadingLoss(self.device, self.deca.config.lipread_loss.lipread_loss) self.lipread_loss.eval() self.lipread_loss.requires_grad_(False) else: self.lipread_loss = None def reconfigure(self, model_params, inout_params, learning_params, stage_name="", downgrade_ok=False, train=True): """ Reconfigure the model. Usually used to switch between detail and coarse stages (which have separate configs) """ if (self.mode == DecaMode.DETAIL and model_params.mode != DecaMode.DETAIL) and not downgrade_ok: raise RuntimeError("You're switching the EMOCA mode from DETAIL to COARSE. Is this really what you want?!") self.inout_params = inout_params self.learning_params = learning_params if self.deca.__class__.__name__ != model_params.deca_class: old_deca_class = self.deca.__class__.__name__ state_dict = self.deca.state_dict() if 'deca_class' in model_params.keys(): deca_class = class_from_str(model_params.deca_class, sys.modules[__name__]) else: deca_class = DECA self.deca = deca_class(config=model_params) diff = set(state_dict.keys()).difference(set(self.deca.state_dict().keys())) if len(diff) > 0: raise RuntimeError(f"Some values from old state dict will not be used. This is probably not what you " f"want because it most likely means that the pretrained model's weights won't be used. " f"Maybe you messed up backbone compatibility (i.e. SWIN vs ResNet?) {diff}") ret = self.deca.load_state_dict(state_dict, strict=False) if len(ret.unexpected_keys) > 0: raise print(f"Unexpected keys: {ret.unexpected_keys}") missing_modules = set([s.split(".")[0] for s in ret.missing_keys]) print(f"Missing modules when upgrading from {old_deca_class} to {model_params.deca_class}:") print(missing_modules) else: self.deca._reconfigure(model_params) self._init_emotion_loss() self._init_au_loss() self.stage_name = stage_name if self.stage_name is None: self.stage_name = "" if len(self.stage_name) > 0: self.stage_name += "_" self.mode = DecaMode[str(model_params.mode).upper()] self.train(mode=train) print(f"EMOCA MODE RECONFIGURED TO: {self.mode}") if 'shape_contrain_type' in self.deca.config.keys() and str(self.deca.config.shape_constrain_type).lower() != 'none': shape_constraint = self.deca.config.shape_constrain_type else: shape_constraint = None if 'expression_constrain_type' in self.deca.config.keys() and str(self.deca.config.expression_constrain_type).lower() != 'none': expression_constraint = self.deca.config.expression_constrain_type else: expression_constraint = None if shape_constraint is not None and expression_constraint is not None: raise ValueError("Both shape constraint and expression constraint are active. This is probably not what we want.") def uses_texture(self): """ Check if the model uses texture """ return self.deca.uses_texture() def visualize(self, visdict, savepath, catdim=1): return self.deca.visualize(visdict, savepath, catdim) def train(self, mode: bool = True): # super().train(mode) # not necessary self.deca.train(mode) if self.emotion_mlp is not None: self.emotion_mlp.train(mode) if self.emonet_loss is not None: self.emonet_loss.eval() if self.deca.perceptual_loss is not None: self.deca.perceptual_loss.eval() if self.deca.id_loss is not None: self.deca.id_loss.eval() return self def to(self, *args, **kwargs): super().to(*args, **kwargs) return self def cuda(self, device=None): super().cuda(device) return self def cpu(self): super().cpu() return self def forward(self, batch): values = self.encode(batch, training=False) values = self.decode(values, training=False) return values def _unwrap_list(self, codelist): shapecode, texcode, expcode, posecode, cam, lightcode = codelist return shapecode, texcode, expcode, posecode, cam, lightcode def _unwrap_list_to_dict(self, codelist): shapecode, texcode, expcode, posecode, cam, lightcode = codelist return {'shape': shapecode, 'tex': texcode, 'exp': expcode, 'pose': posecode, 'cam': cam, 'light': lightcode} # return shapecode, texcode, expcode, posecode, cam, lightcode def _encode_flame(self, images, **kwargs): if self.mode == DecaMode.COARSE or \ (self.mode == DecaMode.DETAIL and self.deca.config.train_coarse): # forward pass with gradients (for coarse stage (used), or detail stage with coarse training (not used)) parameters = self.deca._encode_flame(images, **kwargs) elif self.mode == DecaMode.DETAIL: # in detail stage, the coarse forward pass does not need gradients with torch.no_grad(): parameters = self.deca._encode_flame(images, **kwargs) else: raise ValueError(f"Invalid EMOCA Mode {self.mode}") code_list, original_code = self.deca.decompose_code(parameters) # shapecode, texcode, expcode, posecode, cam, lightcode = code_list # return shapecode, texcode, expcode, posecode, cam, lightcode, original_code return code_list, original_code def _expression_ring_exchange(self, original_batch_size, K, expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, detailcode=None, detailemocode=None, exprw=None, lmk_mp=None, mica_images=None): """ Deprecated. Expression ring exchange is not used in EMOCA (nor DECA). """ new_order = np.array([np.random.permutation(K) + i * K for i in range(original_batch_size)]) new_order = new_order.flatten() expcode_new = expcode[new_order] ## append new shape code data expcode = torch.cat([expcode, expcode_new], dim=0) texcode = torch.cat([texcode, texcode], dim=0) shapecode = torch.cat([shapecode, shapecode], dim=0) globpose = posecode[..., :3] jawpose = posecode[..., 3:] if self.deca.config.expression_constrain_use_jaw_pose: jawpose_new = jawpose[new_order] jawpose = torch.cat([jawpose, jawpose_new], dim=0) else: jawpose = torch.cat([jawpose, jawpose], dim=0) if self.deca.config.expression_constrain_use_global_pose: globpose_new = globpose[new_order] globpose = torch.cat([globpose, globpose_new], dim=0) else: globpose = torch.cat([globpose, globpose], dim=0) if self.deca.config.expression_constrain_use_jaw_pose or self.deca.config.expression_constrain_use_global_pose: posecode = torch.cat([globpose, jawpose], dim=-1) # posecode_new = torch.cat([globpose, jawpose], dim=-1) else: # posecode_new = posecode # posecode_new = posecode posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) if lmk_mp is not None: lmk_mp = torch.cat([lmk_mp, lmk_mp], dim=0) masks = torch.cat([masks, masks], dim=0) # NOTE: # Here we could think about what makes sense to exchange # 1) Do we exchange all emotion GT (VA and expression) within the ring? # 2) Do we exchange only the GT on which the ring is constructed (AffectNet ring based on binned VA or expression or Emonet feature?) # note: if we use EmoMLP that goes from (expression, jawpose, detailcode) -> (v,a,expr) and we exchange # ALL of these, the EmoMLP prediction will of course be the same. The output image still changes, # so EmoNet loss (if used) would be different. Same for the photometric/landmark losses. # TODO: # For now I decided to exchange everything but this should probably be experimented with # I would argue though, that exchanging the GT is the right thing to do if va is not None: va = torch.cat([va, va[new_order]], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7[new_order]], dim=0) if affectnetexp is not None: affectnetexp = torch.cat([affectnetexp, affectnetexp[new_order]], dim=0) if exprw is not None: exprw = torch.cat([exprw, exprw[new_order]], dim=0) if detailcode is not None: #TODO: to exchange or not to exchange, that is the question, the answer is probably NO detailcode = torch.cat([detailcode, detailcode], dim=0) # detailcode = torch.cat([detailcode, detailcode[new_order]], dim=0) if detailemocode is not None: # TODO: to exchange or not to exchange, that is the question, the answer is probably YES detailemocode = torch.cat([detailemocode, detailemocode[new_order]], dim=0) return expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, \ detailcode, detailemocode, exprw, lmk_mp, mica_images # return expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7 def encode(self, batch, training=True) -> dict: """ Forward encoding pass of the model. Takes a batch of images and returns the corresponding latent codes for each image. :param batch: Batch of images to encode. batch['image'] [batch_size, ring_size, 3, image_size, image_size]. For a training forward pass, additional corresponding data are necessery such as 'landmarks' and 'masks'. For a testing pass, the images suffice. :param training: Whether the forward pass is for training or testing. """ codedict = {} original_batch_size = batch['image'].shape[0] images = batch['image'] if 'mica_images' in batch.keys(): mica_images = batch['mica_images'] else: mica_images = None if len(images.shape) == 5: K = images.shape[1] elif len(images.shape) == 4: K = 1 else: raise RuntimeError("Invalid image batch dimensions.") # [B, K, 3, size, size] ==> [BxK, 3, size, size] images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = mica_images.view(-1, mica_images.shape[-3], mica_images.shape[-2], mica_images.shape[-1]) if 'landmark' in batch.keys(): lmk = batch['landmark'] lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) if 'landmark_mediapipe' in batch.keys(): lmk_mp = batch['landmark_mediapipe'] lmk_mp = lmk_mp.view(-1, lmk_mp.shape[-2], lmk_mp.shape[-1]) else: lmk_mp = None if 'mask' in batch.keys(): masks = batch['mask'] masks = masks.view(-1, images.shape[-2], images.shape[-1]) # valence / arousal - not necessary unless we want to use VA for supervision (not done in EMOCA) if 'va' in batch: va = batch['va'] va = va.view(-1, va.shape[-1]) else: va = None # 7 basic expression - not necessary unless we want to use expression for supervision (not done in EMOCA or DECA) if 'expr7' in batch: expr7 = batch['expr7'] expr7 = expr7.view(-1, expr7.shape[-1]) else: expr7 = None # affectnet basic expression - not necessary unless we want to use expression for supervision (not done in EMOCA or DECA) if 'affectnetexp' in batch: affectnetexp = batch['affectnetexp'] affectnetexp = affectnetexp.view(-1, affectnetexp.shape[-1]) else: affectnetexp = None # expression weights if supervising by expression is used (to balance the classification loss) - not done in EMOCA or DECA if 'expression_weight' in batch: exprw = batch['expression_weight'] exprw = exprw.view(-1, exprw.shape[-1]) else: exprw = None # 1) COARSE STAGE # forward pass of the coarse encoder # shapecode, texcode, expcode, posecode, cam, lightcode = self._encode_flame(images) code, original_code = self._encode_flame(images, mica_image=mica_images) shapecode, texcode, expcode, posecode, cam, lightcode = self._unwrap_list(code) if original_code is not None: original_code = self._unwrap_list_to_dict(original_code) if training: # If training, we employ the disentanglement strategy if self.mode == DecaMode.COARSE: if self.deca.config.shape_constrain_type == 'same': ## Enforce that all identity shape codes within ring are the same. The batch is duplicated ## and the duplicated part's shape codes are shuffled. # reshape shapecode => [B, K, n_shape] # shapecode_idK = shapecode.view(self.batch_size, self.deca.K, -1) shapecode_idK = shapecode.view(original_batch_size, K, -1) # get mean id shapecode_mean = torch.mean(shapecode_idK, dim=[1]) # shapecode_new = shapecode_mean[:, None, :].repeat(1, self.deca.K, 1) shapecode_new = shapecode_mean[:, None, :].repeat(1, K, 1) shapecode = shapecode_new.view(-1, self.deca._get_num_shape_params()) # do the same for the original code dict shapecode_orig = original_code['shape'] shapecode_orig_idK = shapecode_orig.view(original_batch_size, K, -1) shapecode_orig_mean = torch.mean(shapecode_orig_idK, dim=[1]) shapecode_orig_new = shapecode_orig_mean[:, None, :].repeat(1, K, 1) original_code['shape'] = shapecode_orig_new.view(-1, self.deca._get_num_shape_params()) elif self.deca.config.shape_constrain_type == 'exchange': ## Shuffle identitys shape codes within ring (they should correspond to the same identity) ''' make sure s0, s1 is something to make shape close the difference from ||so - s1|| is the later encourage s0, s1 is cloase in l2 space, but not really ensure shape will be close ''' # new_order = np.array([np.random.permutation(self.deca.config.train_K) + i * self.deca.config.train_K for i in range(self.deca.config.batch_size_train)]) # new_order = np.array([np.random.permutation(self.deca.config.train_K) + i * self.deca.config.train_K for i in range(original_batch_size)]) new_order = np.array([np.random.permutation(K) + i * K for i in range(original_batch_size)]) new_order = new_order.flatten() shapecode_new = shapecode[new_order] ## append new shape code data shapecode = torch.cat([shapecode, shapecode_new], dim=0) texcode = torch.cat([texcode, texcode], dim=0) expcode = torch.cat([expcode, expcode], dim=0) posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) if lmk_mp is not None: lmk_mp = torch.cat([lmk_mp, lmk_mp], dim=0) masks = torch.cat([masks, masks], dim=0) if va is not None: va = torch.cat([va, va], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7], dim=0) # do the same for the original code dict shapecode_orig = original_code['shape'] shapecode_orig_new = shapecode_orig[new_order] original_code['shape'] = torch.cat([shapecode_orig, shapecode_orig_new], dim=0) original_code['tex'] = torch.cat([original_code['tex'], original_code['tex']], dim=0) original_code['exp'] = torch.cat([original_code['exp'], original_code['exp']], dim=0) original_code['pose'] = torch.cat([original_code['pose'], original_code['pose']], dim=0) original_code['cam'] = torch.cat([original_code['cam'], original_code['cam']], dim=0) original_code['light'] = torch.cat([original_code['light'], original_code['light']], dim=0) elif self.deca.config.shape_constrain_type == 'shuffle_expression': assert original_code is not None ## DEPRECATED, NOT USED IN EMOCA OR DECA new_order = np.random.permutation(K*original_batch_size) old_order = np.arange(K*original_batch_size) while (new_order == old_order).any(): # ugly hacky way of assuring that every element is permuted new_order = np.random.permutation(K * original_batch_size) codedict['new_order'] = new_order # exchange expression expcode_new = expcode[new_order] expcode = torch.cat([expcode, expcode_new], dim=0) # exchange jaw pose (but not global pose) global_pose = posecode[:, :3] jaw_pose = posecode[:, 3:] jaw_pose_new = jaw_pose[new_order] jaw_pose = torch.cat([jaw_pose, jaw_pose_new], dim=0) global_pose = torch.cat([global_pose, global_pose], dim=0) posecode = torch.cat([global_pose, jaw_pose], dim=1) ## duplicate the rest shapecode = torch.cat([shapecode, shapecode], dim=0) texcode = torch.cat([texcode, texcode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## duplicate gt if any images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) print(f"TRAINING: {training}") if lmk is not None: lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) if lmk_mp is not None: lmk_mp = torch.cat([lmk_mp, lmk_mp], dim=0) masks = torch.cat([masks, masks], dim=0) ref_images_identity_idxs = np.concatenate([old_order, old_order]) ref_images_expression_idxs = np.concatenate([old_order, new_order]) codedict["ref_images_identity_idxs"] = ref_images_identity_idxs codedict["ref_images_expression_idxs"] = ref_images_expression_idxs if va is not None: va = torch.cat([va, va[new_order]], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7[new_order]], dim=0) # do the same for the original code dict original_code['shape'] = torch.cat([original_code['shape'], original_code['shape']], dim=0) original_code['tex'] = torch.cat([original_code['tex'], original_code['tex']], dim=0) original_code['exp'] = torch.cat([original_code['exp'], original_code['exp'][new_order]], dim=0) original_global_pose = original_code['pose'][:, :3] original_jaw_pose = original_code['pose'][:, 3:] original_jaw_pose = torch.cat([original_jaw_pose, original_jaw_pose[new_order]], dim=0) original_global_pose = torch.cat([original_global_pose, original_global_pose], dim=0) original_code['pose'] = torch.cat([original_global_pose, original_jaw_pose], dim=1) original_code['cam'] = torch.cat([original_code['cam'], original_code['cam']], dim=0) original_code['light'] = torch.cat([original_code['light'], original_code['light']], dim=0) elif self.deca.config.shape_constrain_type == 'shuffle_shape': ## The shape codes are shuffled without duplication new_order = np.random.permutation(K*original_batch_size) old_order = np.arange(K*original_batch_size) while (new_order == old_order).any(): # ugly hacky way of assuring that every element is permuted new_order = np.random.permutation(K * original_batch_size) codedict['new_order'] = new_order shapecode_new = shapecode[new_order] ## append new shape code data shapecode = torch.cat([shapecode, shapecode_new], dim=0) texcode = torch.cat([texcode, texcode], dim=0) expcode = torch.cat([expcode, expcode], dim=0) posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) if lmk is not None: lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) masks = torch.cat([masks, masks], dim=0) ref_images_identity_idxs = np.concatenate([old_order, new_order]) ref_images_expression_idxs = np.concatenate([old_order, old_order]) codedict["ref_images_identity_idxs"] = ref_images_identity_idxs codedict["ref_images_expression_idxs"] = ref_images_expression_idxs if va is not None: va = torch.cat([va, va], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7], dim=0) # do the same for the original code dict shapecode_orig = original_code['shape'] shapecode_orig_new = shapecode_orig[new_order] original_code['shape'] = torch.cat([shapecode_orig, shapecode_orig_new], dim=0) original_code['tex'] = torch.cat([original_code['tex'], original_code['tex']], dim=0) original_code['exp'] = torch.cat([original_code['exp'], original_code['exp']], dim=0) original_code['pose'] = torch.cat([original_code['pose'], original_code['pose']], dim=0) original_code['cam'] = torch.cat([original_code['cam'], original_code['cam']], dim=0) original_code['light'] = torch.cat([original_code['light'], original_code['light']], dim=0) original_code['ref_images_identity_idxs'] = ref_images_identity_idxs original_code['ref_images_expression_idxs'] = ref_images_expression_idxs elif 'expression_constrain_type' in self.deca.config.keys() and \ self.deca.config.expression_constrain_type == 'same': ## NOT USED IN EMOCA OR DECA, deprecated # reshape shapecode => [B, K, n_shape] # shapecode_idK = shapecode.view(self.batch_size, self.deca.K, -1) expcode_idK = expcode.view(original_batch_size, K, -1) # get mean id expcode_mean = torch.mean(expcode_idK, dim=[1]) # shapecode_new = shapecode_mean[:, None, :].repeat(1, self.deca.K, 1) expcode_new = expcode_mean[:, None, :].repeat(1, K, 1) expcode = expcode_new.view(-1, self.deca._get_num_shape_params()) # do the same thing for the original code dict expcode_idK = original_code['exp'].view(original_batch_size, K, -1) expcode_mean = torch.mean(expcode_idK, dim=[1]) expcode_new = expcode_mean[:, None, :].repeat(1, K, 1) original_code['exp'] = expcode_new.view(-1, self.deca._get_num_shape_params()) elif 'expression_constrain_type' in self.deca.config.keys() and \ self.deca.config.expression_constrain_type == 'exchange': ## NOT USED IN EMOCA OR DECA, deprecated expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, \ masks, va, expr7, affectnetexp, _, _, exprw, lmk_mp, mica_images = \ self._expression_ring_exchange(original_batch_size, K, expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, None, None, exprw, lmk_mp, mica_images) # (self, original_batch_size, K, # expcode, posecode, shapecode, lightcode, texcode, # images, cam, lmk, masks, va, expr7, affectnetexp, # detailcode=None, detailemocode=None, exprw=None): # 2) DETAIL STAGE if self.mode == DecaMode.DETAIL: all_detailcode = self.deca.E_detail(images) # identity-based detail code detailcode = all_detailcode[:, :self.deca.n_detail] # detail emotion code is deprecated and will be empty detailemocode = all_detailcode[:, self.deca.n_detail:(self.deca.n_detail + self.deca.n_detail_emo)] if training: # If training, we employ the disentanglement strategy if self.deca.config.detail_constrain_type == 'exchange': # Identity within the same ring should be the same, so they should have the same code. # This can be enforced by shuffling. The batch is duplicated and the duplicated part's code shuffled ''' make sure s0, s1 is something to make shape close the difference from ||so - s1|| is the later encourage s0, s1 is cloase in l2 space, but not really ensure shape will be close ''' # this creates a per-ring random permutation. The detail exchange happens ONLY between the same # identities (within the ring) but not outside (no cross-identity detail exchange) new_order = np.array( # [np.random.permutation(self.deca.config.train_K) + i * self.deca.config.train_K for i in range(original_batch_size)]) [np.random.permutation(K) + i * K for i in range(original_batch_size)]) new_order = new_order.flatten() detailcode_new = detailcode[new_order] detailcode = torch.cat([detailcode, detailcode_new], dim=0) detailemocode = torch.cat([detailemocode, detailemocode], dim=0) ## append new shape code data shapecode = torch.cat([shapecode, shapecode], dim=0) texcode = torch.cat([texcode, texcode], dim=0) expcode = torch.cat([expcode, expcode], dim=0) posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) masks = torch.cat([masks, masks], dim=0) if va is not None: va = torch.cat([va, va], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7], dim=0) elif self.deca.config.detail_constrain_type == 'shuffle_expression': ## Deprecated and not used in EMOCA or DECA new_order = np.random.permutation(K*original_batch_size) old_order = np.arange(K*original_batch_size) while (new_order == old_order).any(): # ugly hacky way of assuring that every element is permuted new_order = np.random.permutation(K * original_batch_size) codedict['new_order'] = new_order # exchange expression expcode_new = expcode[new_order] expcode = torch.cat([expcode, expcode_new], dim=0) # exchange emotion code, but not (identity-based) detailcode detailemocode_new = detailemocode[new_order] detailemocode = torch.cat([detailemocode, detailemocode_new], dim=0) detailcode = torch.cat([detailcode, detailcode], dim=0) # exchange jaw pose (but not global pose) global_pose = posecode[:, :3] jaw_pose = posecode[:, 3:] jaw_pose_new = jaw_pose[new_order] jaw_pose = torch.cat([jaw_pose, jaw_pose_new], dim=0) global_pose = torch.cat([global_pose, global_pose], dim=0) posecode = torch.cat([global_pose, jaw_pose], dim=1) ## duplicate the rest shapecode = torch.cat([shapecode, shapecode], dim=0) texcode = torch.cat([texcode, texcode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## duplicate gt if any images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) print(f"TRAINING: {training}") if lmk is not None: lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) masks = torch.cat([masks, masks], dim=0) ref_images_identity_idxs = np.concatenate([old_order, old_order]) ref_images_expression_idxs = np.concatenate([old_order, new_order]) codedict["ref_images_identity_idxs"] = ref_images_identity_idxs codedict["ref_images_expression_idxs"] = ref_images_expression_idxs if va is not None: va = torch.cat([va, va[new_order]], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7[new_order]], dim=0) elif self.deca.config.detail_constrain_type == 'shuffle_shape': ## Shuffles teh shape code without duplicating the batch new_order = np.random.permutation(K*original_batch_size) old_order = np.arange(K*original_batch_size) while (new_order == old_order).any(): # ugly hacky way of assuring that every element is permuted new_order = np.random.permutation(K * original_batch_size) codedict['new_order'] = new_order shapecode_new = shapecode[new_order] ## append new shape code data shapecode = torch.cat([shapecode, shapecode_new], dim=0) # exchange (identity-based) detailcode, but not emotion code detailcode_new = detailcode[new_order] detailcode = torch.cat([detailcode, detailcode_new], dim=0) detailemocode = torch.cat([detailemocode, detailemocode], dim=0) texcode = torch.cat([texcode, texcode], dim=0) expcode = torch.cat([expcode, expcode], dim=0) posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) if lmk is not None: lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) masks = torch.cat([masks, masks], dim=0) ref_images_identity_idxs = np.concatenate([old_order, new_order]) ref_images_expression_idxs = np.concatenate([old_order, old_order]) codedict["ref_images_identity_idxs"] = ref_images_identity_idxs codedict["ref_images_expression_idxs"] = ref_images_expression_idxs if va is not None: va = torch.cat([va, va], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7], dim=0) elif 'expression_constrain_type' in self.deca.config.keys() and \ self.deca.config.expression_constrain_type == 'exchange': expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, detailcode, detailemocode, exprw, lmk_mp, mica_images = \ self._expression_ring_exchange(original_batch_size, K, expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, detailcode, detailemocode, exprw, lmk_mp, mica_images) codedict['shapecode'] = shapecode codedict['texcode'] = texcode codedict['expcode'] = expcode codedict['posecode'] = posecode codedict['cam'] = cam codedict['lightcode'] = lightcode if self.mode == DecaMode.DETAIL: codedict['detailcode'] = detailcode codedict['detailemocode'] = detailemocode codedict['images'] = images if mica_images is not None: codedict['mica_images'] = mica_images if 'mask' in batch.keys(): codedict['masks'] = masks if 'landmark' in batch.keys(): codedict['lmk'] = lmk if lmk_mp is not None: codedict['lmk_mp'] = lmk_mp if 'va' in batch.keys(): codedict['va'] = va if 'expr7' in batch.keys(): codedict['expr7'] = expr7 if 'affectnetexp' in batch.keys(): codedict['affectnetexp'] = affectnetexp if 'expression_weight' in batch.keys(): codedict['expression_weight'] = exprw if original_code is not None: codedict['original_code'] = original_code return codedict def _create_conditioning_lists(self, codedict, condition_list): detail_conditioning_list = [] if 'globalpose' in condition_list: detail_conditioning_list += [codedict["posecode"][:, :3]] if 'jawpose' in condition_list: detail_conditioning_list += [codedict["posecode"][:, 3:]] if 'identity' in condition_list: detail_conditioning_list += [codedict["shapecode"]] if 'expression' in condition_list: detail_conditioning_list += [codedict["expcode"]] if isinstance(self.deca.D_detail, Generator): # the detail codes might be excluded from conditioning based on the Generator architecture (for instance # for AdaIn Generator) if 'detail' in condition_list: detail_conditioning_list += [codedict["detailcode"]] if 'detailemo' in condition_list: detail_conditioning_list += [codedict["detailemocode"]] return detail_conditioning_list def decode(self, codedict, training=True, render=True, **kwargs) -> dict: """ Forward decoding pass of the model. Takes the latent code predicted by the encoding stage and reconstructs and renders the shape. :param codedict: Batch dict of the predicted latent codes :param training: Whether the forward pass is for training or testing. """ shapecode = codedict['shapecode'] expcode = codedict['expcode'] posecode = codedict['posecode'] texcode = codedict['texcode'] cam = codedict['cam'] lightcode = codedict['lightcode'] images = codedict['images'] if 'masks' in codedict.keys(): masks = codedict['masks'] else: masks = None effective_batch_size = images.shape[0] # this is the current batch size after all training augmentations modifications # 1) Reconstruct the face mesh # FLAME - world space if not isinstance(self.deca.flame, FLAME_mediapipe): verts, landmarks2d, landmarks3d = self.deca.flame(shape_params=shapecode, expression_params=expcode, pose_params=posecode) landmarks2d_mediapipe = None else: verts, landmarks2d, landmarks3d, landmarks2d_mediapipe = self.deca.flame(shapecode, expcode, posecode) # world to camera trans_verts = util.batch_orth_proj(verts, cam) predicted_landmarks = util.batch_orth_proj(landmarks2d, cam)[:, :, :2] # camera to image space trans_verts[:, :, 1:] = -trans_verts[:, :, 1:] predicted_landmarks[:, :, 1:] = - predicted_landmarks[:, :, 1:] if landmarks2d_mediapipe is not None: predicted_landmarks_mediapipe = util.batch_orth_proj(landmarks2d_mediapipe, cam)[:, :, :2] predicted_landmarks_mediapipe[:, :, 1:] = - predicted_landmarks_mediapipe[:, :, 1:] if self.uses_texture(): albedo = self.deca.flametex(texcode) else: # if not using texture, default to gray albedo = torch.ones([effective_batch_size, 3, self.deca.config.uv_size, self.deca.config.uv_size], device=images.device) * 0.5 # 2) Render the coarse image if render: ops = self.deca.render(verts, trans_verts, albedo, lightcode) # mask mask_face_eye = F.grid_sample(self.deca.uv_face_eye_mask.expand(effective_batch_size, -1, -1, -1), ops['grid'].detach(), align_corners=False) # images predicted_images = ops['images'] # predicted_images = ops['images'] * mask_face_eye * ops['alpha_images'] # predicted_images_no_mask = ops['images'] #* mask_face_eye * ops['alpha_images'] segmentation_type = None if isinstance(self.deca.config.useSeg, bool): if self.deca.config.useSeg: segmentation_type = 'gt' else: segmentation_type = 'rend' elif isinstance(self.deca.config.useSeg, str): segmentation_type = self.deca.config.useSeg else: raise RuntimeError(f"Invalid 'useSeg' type: '{type(self.deca.config.useSeg)}'") if segmentation_type not in ["gt", "rend", "intersection", "union"]: raise ValueError(f"Invalid segmentation type for masking '{segmentation_type}'") if masks is None: # if mask not provided, the only mask available is the rendered one segmentation_type = 'rend' elif masks.shape[-1] != predicted_images.shape[-1] or masks.shape[-2] != predicted_images.shape[-2]: # resize masks if need be (this is only done if configuration was changed at some point after training) dims = masks.ndim == 3 if dims: masks = masks[:, None, :, :] masks = F.interpolate(masks, size=predicted_images.shape[-2:], mode='bilinear') if dims: masks = masks[:, 0, ...] # resize images if need be (this is only done if configuration was changed at some point after training) if images.shape[-1] != predicted_images.shape[-1] or images.shape[-2] != predicted_images.shape[-2]: ## special case only for inference time if the rendering image sizes have been changed images_resized = F.interpolate(images, size=predicted_images.shape[-2:], mode='bilinear') else: images_resized = images # what type of segmentation we use if segmentation_type == "gt": # GT stands for external segmetnation predicted by face parsing or similar masks = masks[:, None, :, :] elif segmentation_type == "rend": # mask rendered as a silhouette of the face mesh masks = mask_face_eye * ops['alpha_images'] elif segmentation_type == "intersection": # intersection of the two above masks = masks[:, None, :, :] * mask_face_eye * ops['alpha_images'] elif segmentation_type == "union": # union of the first two options masks = torch.max(masks[:, None, :, :], mask_face_eye * ops['alpha_images']) else: raise RuntimeError(f"Invalid segmentation type for masking '{segmentation_type}'") if self.deca.config.background_from_input in [True, "input"]: if images.shape[-1] != predicted_images.shape[-1] or images.shape[-2] != predicted_images.shape[-2]: ## special case only for inference time if the rendering image sizes have been changed predicted_images = (1. - masks) * images_resized + masks * predicted_images else: predicted_images = (1. - masks) * images + masks * predicted_images elif self.deca.config.background_from_input in [False, "black"]: predicted_images = masks * predicted_images elif self.deca.config.background_from_input in ["none"]: predicted_images = predicted_images else: raise ValueError(f"Invalid type of background modification {self.deca.config.background_from_input}") # 3) Render the detail image if self.mode == DecaMode.DETAIL: detailcode = codedict['detailcode'] detailemocode = codedict['detailemocode'] # a) Create the detail conditioning lists detail_conditioning_list = self._create_conditioning_lists(codedict, self.detail_conditioning) detailemo_conditioning_list = self._create_conditioning_lists(codedict, self.detailemo_conditioning) final_detail_conditioning_list = detail_conditioning_list + detailemo_conditioning_list # b) Pass the detail code and the conditions through the detail generator to get displacement UV map if isinstance(self.deca.D_detail, Generator): uv_z = self.deca.D_detail(torch.cat(final_detail_conditioning_list, dim=1)) elif isinstance(self.deca.D_detail, GeneratorAdaIn): uv_z = self.deca.D_detail(z=torch.cat([detailcode, detailemocode], dim=1), cond=torch.cat(final_detail_conditioning_list, dim=1)) else: raise ValueError(f"This class of generarator is not supported: '{self.deca.D_detail.__class__.__name__}'") # if there is a displacement mask, apply it (DEPRECATED and not USED in DECA or EMOCA) if hasattr(self.deca, 'displacement_mask') and self.deca.displacement_mask is not None: if 'apply_displacement_masks' in self.deca.config.keys() and self.deca.config.apply_displacement_masks: uv_z = uv_z * self.deca.displacement_mask # uv_z = self.deca.D_detail(torch.cat([posecode[:, 3:], expcode, detailcode], dim=1)) # render detail if render: detach_from_coarse_geometry = not self.deca.config.train_coarse uv_detail_normals, uv_coarse_vertices = self.deca.displacement2normal(uv_z, verts, ops['normals'], detach=detach_from_coarse_geometry) uv_shading = self.deca.render.add_SHlight(uv_detail_normals, lightcode.detach()) uv_texture = albedo.detach() * uv_shading # batch size X image_rows X image_cols X 2 # you can query the grid for UV values of the face mesh at pixel locations grid = ops['grid'] if detach_from_coarse_geometry: # if the grid is detached, the gradient of the positions of UV-values in image space won't flow back to the geometry grid = grid.detach() predicted_detailed_image = F.grid_sample(uv_texture, grid, align_corners=False) if self.deca.config.background_from_input in [True, "input"]: if images.shape[-1] != predicted_images.shape[-1] or images.shape[-2] != predicted_images.shape[-2]: ## special case only for inference time if the rendering image sizes have been changed # images_resized = F.interpolate(images, size=predicted_images.shape[-2:], mode='bilinear') ## before bugfix # predicted_images = (1. - masks) * images_resized + masks * predicted_images ## after bugfix predicted_detailed_image = (1. - masks) * images_resized + masks * predicted_detailed_image else: predicted_detailed_image = (1. - masks) * images + masks * predicted_detailed_image elif self.deca.config.background_from_input in [False, "black"]: predicted_detailed_image = masks * predicted_detailed_image elif self.deca.config.background_from_input in ["none"]: predicted_detailed_image = predicted_detailed_image else: raise ValueError(f"Invalid type of background modification {self.deca.config.background_from_input}") # --- extract texture uv_pverts = self.deca.render.world2uv(trans_verts).detach() uv_gt = F.grid_sample(torch.cat([images_resized, masks], dim=1), uv_pverts.permute(0, 2, 3, 1)[:, :, :, :2], mode='bilinear') uv_texture_gt = uv_gt[:, :3, :, :].detach() uv_mask_gt = uv_gt[:, 3:, :, :].detach() # self-occlusion normals = util.vertex_normals(trans_verts, self.deca.render.faces.expand(effective_batch_size, -1, -1)) uv_pnorm = self.deca.render.world2uv(normals) uv_mask = (uv_pnorm[:, -1, :, :] < -0.05).float().detach() uv_mask = uv_mask[:, None, :, :] ## combine masks uv_vis_mask = uv_mask_gt * uv_mask * self.deca.uv_face_eye_mask else: uv_detail_normals = None predicted_detailed_image = None ## 4) (Optional) NEURAL RENDERING - not used in neither DECA nor EMOCA # If neural rendering is enabled, the differentiable rendered synthetic images are translated using an image translation net (such as StarGan) predicted_translated_image = None predicted_detailed_translated_image = None translated_uv_texture = None if render: if self.deca._has_neural_rendering(): predicted_translated_image = self.deca.image_translator( { "input_image" : predicted_images, "ref_image" : images, "target_domain" : torch.tensor([0]*predicted_images.shape[0], dtype=torch.int64, device=predicted_images.device) } ) if self.mode == DecaMode.DETAIL: predicted_detailed_translated_image = self.deca.image_translator( { "input_image" : predicted_detailed_image, "ref_image" : images, "target_domain" : torch.tensor([0]*predicted_detailed_image.shape[0], dtype=torch.int64, device=predicted_detailed_image.device) } ) translated_uv = F.grid_sample(torch.cat([predicted_detailed_translated_image, masks], dim=1), uv_pverts.permute(0, 2, 3, 1)[:, :, :, :2], mode='bilinear') translated_uv_texture = translated_uv[:, :3, :, :].detach() else: predicted_detailed_translated_image = None translated_uv_texture = None # no need in coarse mode # translated_uv = F.grid_sample(torch.cat([predicted_translated_image, masks], dim=1), uv_pverts.permute(0, 2, 3, 1)[:, :, :, :2], # mode='bilinear') # translated_uv_texture = translated_uv_gt[:, :3, :, :].detach() if self.emotion_mlp is not None: codedict = self.emotion_mlp(codedict, "emo_mlp_") # populate the value dict for metric computation/visualization if render: codedict['predicted_images'] = predicted_images codedict['predicted_detailed_image'] = predicted_detailed_image codedict['predicted_translated_image'] = predicted_translated_image codedict['ops'] = ops codedict['normals'] = ops['normals'] codedict['mask_face_eye'] = mask_face_eye codedict['verts'] = verts codedict['albedo'] = albedo codedict['landmarks2d'] = landmarks2d codedict['landmarks3d'] = landmarks3d codedict['predicted_landmarks'] = predicted_landmarks if landmarks2d_mediapipe is not None: codedict['predicted_landmarks_mediapipe'] = predicted_landmarks_mediapipe codedict['trans_verts'] = trans_verts codedict['masks'] = masks if self.mode == DecaMode.DETAIL: if render: codedict['predicted_detailed_translated_image'] = predicted_detailed_translated_image codedict['translated_uv_texture'] = translated_uv_texture codedict['uv_texture_gt'] = uv_texture_gt codedict['uv_texture'] = uv_texture codedict['uv_detail_normals'] = uv_detail_normals codedict['uv_shading'] = uv_shading codedict['uv_vis_mask'] = uv_vis_mask codedict['uv_mask'] = uv_mask codedict['uv_z'] = uv_z codedict['displacement_map'] = uv_z + self.deca.fixed_uv_dis[None, None, :, :] return codedict def _compute_emotion_loss(self, images, predicted_images, loss_dict, metric_dict, prefix, va=None, expr7=None, with_grad=True, batch_size=None, ring_size=None): def loss_or_metric(name, loss, is_loss): if not is_loss: metric_dict[name] = loss else: loss_dict[name] = loss # if self.deca.config.use_emonet_loss: if with_grad: d = loss_dict emo_feat_loss_1, emo_feat_loss_2, valence_loss, arousal_loss, expression_loss, au_loss = \ self.emonet_loss.compute_loss(images, predicted_images, batch_size=batch_size, ring_size=ring_size) else: d = metric_dict with torch.no_grad(): emo_feat_loss_1, emo_feat_loss_2, valence_loss, arousal_loss, expression_loss, au_loss = \ self.emonet_loss.compute_loss(images, predicted_images, batch_size=batch_size, ring_size=ring_size) # EmoNet self-consistency loss terms if emo_feat_loss_1 is not None: loss_or_metric(prefix + '_emonet_feat_1_L1', emo_feat_loss_1 * self.deca.config.emonet_weight, self.deca.config.use_emonet_feat_1 and self.deca.config.use_emonet_loss) loss_or_metric(prefix + '_emonet_feat_2_L1', emo_feat_loss_2 * self.deca.config.emonet_weight, self.deca.config.use_emonet_feat_2 and self.deca.config.use_emonet_loss) loss_or_metric(prefix + '_emonet_valence_L1', valence_loss * self.deca.config.emonet_weight, self.deca.config.use_emonet_valence and self.deca.config.use_emonet_loss) loss_or_metric(prefix + '_emonet_arousal_L1', arousal_loss * self.deca.config.emonet_weight, self.deca.config.use_emonet_arousal and self.deca.config.use_emonet_loss) # loss_or_metric(prefix + 'emonet_expression_KL', expression_loss * self.deca.config.emonet_weight) # KL seems to be causing NaN's loss_or_metric(prefix + '_emonet_expression_L1',expression_loss * self.deca.config.emonet_weight, self.deca.config.use_emonet_expression and self.deca.config.use_emonet_loss) loss_or_metric(prefix + '_emonet_combined', ((emo_feat_loss_1 if emo_feat_loss_1 is not None else 0) + emo_feat_loss_2 + valence_loss + arousal_loss + expression_loss) * self.deca.config.emonet_weight, self.deca.config.use_emonet_combined and self.deca.config.use_emonet_loss) # Log also the VA metric_dict[prefix + "_valence_input"] = self.emonet_loss.input_emotion['valence'].mean().detach() metric_dict[prefix + "_valence_output"] = self.emonet_loss.output_emotion['valence'].mean().detach() metric_dict[prefix + "_arousal_input"] = self.emonet_loss.input_emotion['arousal'].mean().detach() metric_dict[prefix + "_arousal_output"] = self.emonet_loss.output_emotion['arousal'].mean().detach() input_ex = self.emonet_loss.input_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'].detach().cpu().numpy() input_ex = np.argmax(input_ex, axis=1).mean() output_ex = self.emonet_loss.output_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'].detach().cpu().numpy() output_ex = np.argmax(output_ex, axis=1).mean() metric_dict[prefix + "_expression_input"] = torch.tensor(input_ex, device=self.device) metric_dict[prefix + "_expression_output"] = torch.tensor(output_ex, device=self.device) # # GT emotion loss terms # if self.deca.config.use_gt_emotion_loss: # d = loss_dict # else: # d = metric_dict # TODO: uncomment this after you handle the case when certain entries are NaN (GT missing, not a bug) # if va is not None: # d[prefix + 'emo_sup_val_L1'] = F.l1_loss(self.emonet_loss.output_emotion['valence'], va[:, 0]) \ # * self.deca.config.gt_emotion_reg # d[prefix + 'emo_sup_ar_L1'] = F.l1_loss(self.emonet_loss.output_emotion['arousal'], va[:, 1]) \ # * self.deca.config.gt_emotion_reg # # metric_dict[prefix + "_valence_gt"] = va[:, 0].mean().detach() # metric_dict[prefix + "_arousal_gt"] = va[:, 1].mean().detach() # # if expr7 is not None: # affectnet_gt = [expr7_to_affect_net(int(expr7[i])).value for i in range(len(expr7))] # affectnet_gt = torch.tensor(np.array(affectnet_gt), device=self.device, dtype=torch.long) # d[prefix + '_emo_sup_expr_CE'] = F.cross_entropy(self.emonet_loss.output_emotion['expression'], affectnet_gt) * self.deca.config.gt_emotion_reg # metric_dict[prefix + "_expr_gt"] = affectnet_gt.mean().detach() def _compute_au_loss(self, images, predicted_images, loss_dict, metric_dict, prefix, au=None, with_grad=True): def loss_or_metric(name, loss, is_loss): if not is_loss: metric_dict[name] = loss else: loss_dict[name] = loss # if self.deca.config.use_emonet_loss: if with_grad: d = loss_dict au_feat_loss_1, au_feat_loss_2, _, _, _, au_loss = \ self.au_loss.compute_loss(images, predicted_images) else: d = metric_dict with torch.no_grad(): au_feat_loss_1, au_feat_loss_2, _, _, _, au_loss = \ self.au_loss.compute_loss(images, predicted_images) # EmoNet self-consistency loss terms if au_feat_loss_1 is not None: loss_or_metric(prefix + '_au_feat_1_L1', au_feat_loss_1 * self.deca.config.au_loss.au_weight, self.deca.config.au_loss.use_feat_1 and self.deca.config.au_loss.use_as_loss) loss_or_metric(prefix + '_au_feat_2_L1', au_feat_loss_2 * self.deca.config.au_loss.au_weight, self.deca.config.au_loss.use_feat_2 and self.deca.config.au_loss.use_as_loss) loss_or_metric(prefix + '_au_loss', au_loss * self.deca.config.au_loss.au_weight, self.deca.config.au_loss.use_aus and self.deca.config.au_loss.use_as_loss) # loss_or_metric(prefix + '_au_losses_L1', arousal_loss * self.deca.config.au_loss.au_weight, # self.deca.config.au_loss.use_emonet_arousal and self.deca.config.au_loss.use_as_loss) # loss_or_metric(prefix + 'emonet_expression_KL', expression_loss * self.deca.config.au_loss.au_weight) # KL seems to be causing NaN's # # Log also the VA # metric_dict[prefix + "_valence_input"] = self.emonet_loss.input_emotion['valence'].mean().detach() # metric_dict[prefix + "_valence_output"] = self.emonet_loss.output_emotion['valence'].mean().detach() # metric_dict[prefix + "_arousal_input"] = self.emonet_loss.input_emotion['arousal'].mean().detach() # metric_dict[prefix + "_arousal_output"] = self.emonet_loss.output_emotion['arousal'].mean().detach() # input_ex = self.emonet_loss.input_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'].detach().cpu().numpy() # input_ex = np.argmax(input_ex, axis=1).mean() # output_ex = self.emonet_loss.output_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'].detach().cpu().numpy() # output_ex = np.argmax(output_ex, axis=1).mean() # metric_dict[prefix + "_expression_input"] = torch.tensor(input_ex, device=self.device) # metric_dict[prefix + "_expression_output"] = torch.tensor(output_ex, device=self.device) # # GT emotion loss terms # if self.deca.config.use_gt_emotion_loss: # d = loss_dict # else: # d = metric_dict def _cut_mouth_vectorized(self, images, landmarks, convert_grayscale=True): # mouth_window_margin = 12 mouth_window_margin = 1 # not temporal mouth_crop_height = 96 mouth_crop_width = 96 mouth_landmark_start_idx = 48 mouth_landmark_stop_idx = 68 B, T = images.shape[:2] landmarks = landmarks.to(torch.float32) with torch.no_grad(): image_size = images.shape[-1] / 2 landmarks = landmarks * image_size + image_size # #1) smooth the landmarks with temporal convolution # landmarks are of shape (T, 68, 2) # reshape to (T, 136) landmarks_t = landmarks.reshape(*landmarks.shape[:2], -1) # make temporal dimension last landmarks_t = landmarks_t.permute(0, 2, 1) # change chape to (N, 136, T) # landmarks_t = landmarks_t.unsqueeze(0) # smooth with temporal convolution temporal_filter = torch.ones(mouth_window_margin, device=images.device) / mouth_window_margin # pad the the landmarks landmarks_t_padded = F.pad(landmarks_t, (mouth_window_margin // 2, mouth_window_margin // 2), mode='replicate') # convolve each channel separately with the temporal filter num_channels = landmarks_t.shape[1] if temporal_filter.numel() > 1: smooth_landmarks_t = F.conv1d(landmarks_t_padded, temporal_filter.unsqueeze(0).unsqueeze(0).expand(num_channels,1,temporal_filter.numel()), groups=num_channels, padding='valid' ) smooth_landmarks_t = smooth_landmarks_t[..., 0:landmarks_t.shape[-1]] else: smooth_landmarks_t = landmarks_t # reshape back to the original shape smooth_landmarks_t = smooth_landmarks_t.permute(0, 2, 1).view(landmarks.shape) smooth_landmarks_t = smooth_landmarks_t + landmarks.mean(dim=2, keepdims=True) - smooth_landmarks_t.mean(dim=2, keepdims=True) # #2) get the mouth landmarks mouth_landmarks_t = smooth_landmarks_t[..., mouth_landmark_start_idx:mouth_landmark_stop_idx, :] # #3) get the mean of the mouth landmarks mouth_landmarks_mean_t = mouth_landmarks_t.mean(dim=-2, keepdims=True) # #4) get the center of the mouth center_x_t = mouth_landmarks_mean_t[..., 0] center_y_t = mouth_landmarks_mean_t[..., 1] # #5) use grid_sample to crop the mouth in every image # create the grid height = mouth_crop_height//2 width = mouth_crop_width//2 torch.arange(0, mouth_crop_width, device=images.device) grid = torch.stack(torch.meshgrid(torch.linspace(-height, height, mouth_crop_height).to(images.device) / (images.shape[-2] /2), torch.linspace(-width, width, mouth_crop_width).to(images.device) / (images.shape[-1] /2) ), dim=-1) grid = grid[..., [1, 0]] grid = grid.unsqueeze(0).unsqueeze(0).repeat(*images.shape[:2], 1, 1, 1) center_x_t -= images.shape[-1] / 2 center_y_t -= images.shape[-2] / 2 center_x_t /= images.shape[-1] / 2 center_y_t /= images.shape[-2] / 2 grid = grid + torch.cat([center_x_t, center_y_t ], dim=-1).unsqueeze(-2).unsqueeze(-2) images = images.view(B*T, *images.shape[2:]) grid = grid.view(B*T, *grid.shape[2:]) if convert_grayscale: images = F_v.rgb_to_grayscale(images) image_crops = F.grid_sample( images, grid, align_corners=True, padding_mode='zeros', mode='bicubic' ) image_crops = image_crops.view(B, T, *image_crops.shape[1:]) if convert_grayscale: image_crops = image_crops#.squeeze(1) # import matplotlib.pyplot as plt # plt.figure() # plt.imshow(image_crops[0, 0].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[0, 10].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[0, 20].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[1, 0].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[1, 10].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[1, 20].permute(1,2,0).cpu().numpy()) # plt.show() return image_crops def _compute_lipread_loss(self, images, predicted_images, landmarks, predicted_landmarks, loss_dict, metric_dict, prefix, with_grad=True): def loss_or_metric(name, loss, is_loss): if not is_loss: metric_dict[name] = loss else: loss_dict[name] = loss # shape of images is: (B, R, C, H, W) # convert to (B * R, 1, H, W, C) images = images.unsqueeze(1) predicted_images = predicted_images.unsqueeze(1) landmarks = landmarks.unsqueeze(1) predicted_landmarks = predicted_landmarks.unsqueeze(1) # cut out the mouth region images_mouth = self._cut_mouth_vectorized(images, landmarks) predicted_images_mouth = self._cut_mouth_vectorized(predicted_images, predicted_landmarks) # make sure that the lip reading net interprests things with depth=1, # if self.deca.config.use_emonet_loss: if with_grad: d = loss_dict loss = self.lipread_loss.compute_loss(images_mouth, predicted_images_mouth) else: d = metric_dict with torch.no_grad(): loss = self.lipread_loss.compute_loss(images_mouth, predicted_images_mouth) d[prefix + '_lipread'] = loss * self.deca.config.lipread_loss.weight def _metric_or_loss(self, loss_dict, metric_dict, is_loss): if is_loss: d = loss_dict else: d = metric_dict return d def _compute_id_loss(self, codedict, batch, training, testing, losses, batch_size, ring_size): # if self.deca.config.idw > 1e-3: if self.deca.id_loss is not None: images = codedict["images"] ops = codedict["ops"] mask_face_eye = codedict["mask_face_eye"] shading_images = self.deca.render.add_SHlight(ops['normal_images'], codedict["lightcode"].detach()) albedo_images = F.grid_sample(codedict["albedo"].detach(), ops['grid'], align_corners=False) # TODO: get to the bottom of this weird overlay thing - why is it there? # answer: This renders the face and takes background from the image overlay = albedo_images * shading_images * mask_face_eye + images * (1 - mask_face_eye) if self.global_step >= self.deca.id_loss_start_step: if 'id_metric' in self.deca.config.keys() and 'barlow_twins' in self.deca.config.id_metric: assert ring_size == 1 or ring_size == 2 effective_bs = images.shape[0] # losses['identity'] = self.deca.id_loss(overlay, images, batch_size=batch_size, # ring_size=ring_size) * self.deca.config.idw if "ref_images_identity_idxs" in codedict.keys(): # in case there was shuffling, this ensures that the proper images are used for identity loss images_ = images[codedict["ref_images_identity_idxs"]] else: images_ = images losses['identity'] = self.deca.id_loss(overlay, images_, batch_size=effective_bs, ring_size=1) * self.deca.config.idw if 'id_contrastive' in self.deca.config.keys() and bool(self.deca.config.id_contrastive): if ring_size == 2: assert effective_bs % 2 == 0 assert self.deca.id_loss.trainable has_been_shuffled = 'new_order' in codedict.keys() idxs_a = torch.arange(0, images.shape[0], 2) # indices of first images within the ring idxs_b = torch.arange(1, images.shape[0], 2) # indices of second images within the ring # WARNING - this assumes the ring is identity-based if self.deca.config.id_contrastive in [True, "real", "both"]: # we are taking this from the original batch dict because we do not care about the # shuffled, duplicated samples (they don't have to be doubled) images_0 = batch["image"][:, 0, ...] images_1 = batch["image"][:, 1, ...] losses['identity_contrastive_real'] = self.deca.id_loss( images_0, # first images within the ring images_1, # second images within the ring batch_size=images_0.shape[0], ring_size=1) * self.deca.config.idw * 2 if self.deca.config.id_contrastive in [True, "synth", "both"]: if self.deca.config.shape_constrain_type in ['exchange', 'same']: # we can take all when identity has been exchange within rings overlay_0 = overlay[idxs_a] overlay_1 = overlay[idxs_b] else: #if the batch was double otherwise (global shuffling) we only take the first half # if batch_size * ring_size < effective_bs: overlay_0 = overlay[0:batch_size * ring_size:2] overlay_1 = overlay[1:batch_size * ring_size:2] losses['identity_contrastive_synthetic'] = self.deca.id_loss( overlay_0, # first images within the ring overlay_1, # second images within the ring batch_size=overlay_0.shape[0], ring_size=1) * self.deca.config.idw if has_been_shuffled: new_order = codedict['new_order'] # TODO: compare the idxs to these: # codedict["ref_images_identity_idxs"] if self.deca.config.shape_constrain_type == 'shuffle_expression': idxs_a_synth = np.arange(new_order.shape[0]) # first half of the batch idxs_b_synth = np.arange(new_order.shape[0], 2 * new_order.shape[0]) # second half of the batch elif self.deca.config.shape_constrain_type == 'shuffle_shape': idxs_a_synth = new_order # shuffled first half of the batch idxs_b_synth = np.arange(new_order.shape[0], 2 * new_order.shape[0]) # second half of the batch else: raise NotImplementedError("Unexpected shape consistency value ") # if this doesn't go through, something went wrong with the shuffling indexations assert codedict["shapecode"][idxs_a_synth].allclose(codedict["shapecode"][idxs_b_synth]) losses['identity_contrastive_synthetic_shuffled'] = self.deca.id_loss( overlay[idxs_a_synth], # synthetic images of identities with reconstructed expressions overlay[idxs_b_synth], # synthetic images of identities with shuffled expressions batch_size=idxs_a_synth.size, ring_size=1) * self.deca.config.idw losses['identity_contrastive_synthetic2real_shuffled'] = self.deca.id_loss( images[idxs_a_synth], # synthetic images of identities with reconstructed expressions overlay[idxs_b_synth], # synthetic images of identities with shuffled expressions batch_size=idxs_a_synth.size, ring_size=1) * self.deca.config.idw elif ring_size > 2: raise NotImplementedError("Contrastive loss does not support ring sizes > 2.") return losses def _compute_emonet_loss_wrapper(self, codedict, batch, training, testing, losses, metrics, prefix, image_key, with_grad, batch_size, ring_size): if self.emonet_loss is not None: if 'va' in codedict: va = codedict['va'] va = va.view(-1, va.shape[-1]) else: va = None if 'expr7' in codedict: expr7 = codedict['expr7'] expr7 = expr7.view(-1, expr7.shape[-1]) else: expr7 = None # with torch.no_grad(): # TODO: if expression shuffled, this needs to be changed, the input images no longer correspond images = codedict["images"] predicted_images = codedict[image_key] effective_bs = images.shape[0] if "ref_images_expression_idxs" in codedict.keys(): # in case there was shuffling, this ensures that the proper images are used for emotion loss images_ = images[codedict["ref_images_expression_idxs"]] else: images_ = images effective_bs = images.shape[0] self._compute_emotion_loss(images_, predicted_images, losses, metrics, f"{prefix}", va, expr7, with_grad=with_grad, batch_size=effective_bs, ring_size=1) codedict[f"{prefix}_valence_input"] = self.emonet_loss.input_emotion['valence'] codedict[f"{prefix}_arousal_input"] = self.emonet_loss.input_emotion['arousal'] codedict[f"{prefix}_expression_input"] = self.emonet_loss.input_emotion[ 'expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] codedict[f"{prefix}_valence_output"] = self.emonet_loss.output_emotion['valence'] codedict[f"{prefix}_arousal_output"] = self.emonet_loss.output_emotion['arousal'] codedict[f"{prefix}_expression_output"] = self.emonet_loss.output_emotion[ 'expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] if 'emo_contrastive' in self.deca.config.keys() and self.deca.config.emo_contrastive: assert ring_size == 2 or ring_size == 1 assert self.emonet_loss.trainable or ( hasattr(self.emonet_loss, 'clone_is_trainable') and self.emonet_lossclone_is_trainable) has_been_shuffled = 'new_order' in codedict.keys() # if self.deca.config.shape_constrain_type == 'shuffle_expression' and has_been_shuffled: # new_order = codedict['new_order'] # if self.deca.config.emo_contrastive in [True, "real", "both"]: if ring_size == 2: assert effective_bs % 2 == 0 if not isinstance(self.deca, ExpDECA): raise NotImplementedError("Cross-ring emotion contrast means the ring has to be " "expression based, not identity based. This is not guaranteed " "for vanilla EMOCA (or its datasets).") # we are taking this from the original batch dict because we do not care about the # shuffled, duplicated samples (they don't have to be doubled) images_0 = batch["image"][:, 0, ...] images_1 = batch["image"][:, 1, ...] self._compute_emotion_loss(images_0, # real images of first expressions in the ring images_1, # real images of second expressions in the ring losses, metrics, f"{prefix}_contrastive_real", va, expr7, with_grad=self.deca.config.use_emonet_loss, batch_size=images_0.shape[0], ring_size=1) else: print("[WARNING] Cannot compute real contrastive emotion loss because there is no ring!") if self.deca.config.emo_contrastive in [True, "synth", "both"]: if ring_size == 2: assert effective_bs % 2 == 0 idxs_a = torch.arange(0, images.shape[0], 2) # indices of first expressions within a ring idxs_b = torch.arange(1, images.shape[0], 2) # indices of second expressions within a ring if 'expression_constrain_type' in self.deca.config.keys() and \ self.deca.config.expression_constrain_type in ['exchange', 'same']: # we can take all when identity has been exchange within rings predicted_images_0 = predicted_images[idxs_a] predicted_images_1 = predicted_images[idxs_b] raise RuntimeError("This should work but it was never tested or intended. Make sure this works.") else: # if the batch was double otherwise (global shuffling) we only take the first half # if batch_size * ring_size < effective_bs: predicted_images_0 = predicted_images[0:batch_size * ring_size:2] predicted_images_1 = predicted_images[1:batch_size * ring_size:2] if not isinstance(self.deca, ExpDECA): raise NotImplementedError("Cross-ring emotion contrast means the ring has to be " "expression based, not identity based. This is not guaranteed " "for vanilla EMOCA.") self._compute_emotion_loss(predicted_images_0, # rec images of first expressions in the ring predicted_images_1, # rec images of second expressions in the ring losses, metrics, f"{prefix}_contrastive_synth", va, expr7, with_grad=self.deca.config.use_emonet_loss, batch_size=predicted_images_1.shape[0], ring_size=1) else: print("[WARNING] Cannot compute synthetic contrastive emotion loss because there is no ring!") if has_been_shuffled: new_order = codedict['new_order'] if self.deca.config.shape_constrain_type == 'shuffle_expression': # this gets tricky, in this case the images are not duplicates -> we need all, but the second # half's order is shuffled, so we need to be careful here idxs_a_synth = new_order # shuffled first half of the batch idxs_b_synth = np.arange(new_order.shape[0], 2 * new_order.shape[0]) # second half of the batch elif self.deca.config.shape_constrain_type == 'shuffle_shape': idxs_a_synth = np.arange(new_order.shape[0]) # first half of the batch idxs_b_synth = np.arange(new_order.shape[0], 2 * new_order.shape[0]) # second half of the batch # if this doesn't go through, something went wrong with the shuffling indexations assert codedict["expcode"][idxs_a_synth].allclose(codedict["expcode"][idxs_b_synth]) # the expressions at corresponding index positions of idxs_a_synth and idxs_b_synth should match now self._compute_emotion_loss(predicted_images[idxs_a_synth], # synthetic images of reconstructed expressions and corresponding identities predicted_images[idxs_b_synth], # synthetic images of reconstructed expressions and shuffled identities losses, metrics, f"{prefix}_contrastive_synth_shuffled", va, expr7, with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), batch_size=idxs_a_synth.size, ring_size=1) self._compute_emotion_loss(images[idxs_a_synth], # synthetic images of reconstructed expressions and corresponding identities predicted_images[idxs_b_synth], # synthetic images of reconstructed expressions and shuffled identities losses, metrics, f"{prefix}_contrastive_synth2real_shuffled", va, expr7, with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), batch_size=idxs_a_synth.size, ring_size=1) if va is not None: codedict[f"{prefix}_valence_gt"] = va[:, 0] codedict[f"{prefix}_arousal_gt"] = va[:, 1] if expr7 is not None: codedict[f"{prefix}_expression_gt"] = expr7 if self.deca._has_neural_rendering(): assert 'emo_contrastive' not in self.deca.config.keys() or self.deca.config.emo_contrastive is False # TODO possible to make this more GPU efficient by not recomputing emotion for input image self._compute_emotion_loss(images, predicted_translated_image, losses, metrics, f"{prefix}_translated", va, expr7, with_grad=self.deca.config.use_emonet_loss and self.deca._has_neural_rendering(), batch_size=bs, ring_size=1) # codedict[f"{prefix}_valence_input"] = self.emonet_loss.input_emotion['valence'] # codedict[f"{prefix}_arousal_input"] = self.emonet_loss.input_emotion['arousal'] # codedict[f"{prefix}_expression_input"] = self.emonet_loss.input_emotion['expression'] codedict[f"{prefix}_translated_valence_output"] = self.emonet_loss.output_emotion['valence'] codedict[f"{prefix}_translated_arousal_output"] = self.emonet_loss.output_emotion['arousal'] codedict[f"{prefix}_translated_expression_output"] = self.emonet_loss.output_emotion[ 'expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] return losses, metrics, codedict def _compute_loss(self, codedict, batch, training=True, testing=False): #### ----------------------- Losses losses = {} metrics = {} predicted_landmarks = codedict["predicted_landmarks"] predicted_landmarks_mediapipe = codedict.get("predicted_landmarks_mediapipe", None) if "lmk" in codedict.keys(): lmk = codedict["lmk"] else: lmk = None if "lmk_mp" in codedict.keys(): lmk_mp = codedict["lmk_mp"] else: lmk_mp = None if "masks" in codedict.keys(): masks = codedict["masks"] else: masks = None batch_size = codedict["predicted_images"].shape[0] use_geom_losses = 'use_geometric_losses_expression_exchange' in self.deca.config.keys() and \ self.deca.config.use_geometric_losses_expression_exchange if training and ('expression_constrain_type' in self.deca.config.keys() \ and ('expression_constrain_type' in self.deca.config.keys() and self.deca.config.expression_constrain_type == 'exchange') or ( 'shape_constrain_type' in self.deca.config.keys() and self.deca.config.shape_constrain_type in ['shuffle_expression', 'shuffle_shape'])) \ and (self.deca.mode == DecaMode.COARSE or self.deca.config.train_coarse) \ and (not use_geom_losses): if batch_size % 2 != 0: raise RuntimeError("The batch size should be even because it should have " f"got doubled in expression ring exchange. Instead it was odd: {batch_size}") # THIS IS DONE BECAUSE LANDMARK AND PHOTOMETRIC LOSSES MAKE NO SENSE FOR EXPRESSION EXCHANGE geom_losses_idxs = batch_size // 2 else: geom_losses_idxs = batch_size predicted_images = codedict["predicted_images"] images = codedict["images"] lightcode = codedict["lightcode"] albedo = codedict["albedo"] mask_face_eye = codedict["mask_face_eye"] shapecode = codedict["shapecode"] expcode = codedict["expcode"] texcode = codedict["texcode"] ops = codedict["ops"] if self.mode == DecaMode.DETAIL: uv_texture = codedict["uv_texture"] uv_texture_gt = codedict["uv_texture_gt"] # this determines the configured batch size that is currently used (training, validation or testing) # the reason why this is important is because of potential multi-gpu training and loss functions (such as Barlow Twins) # that might need the full size of the batch (not just the chunk of the current GPU). if training: bs = self.learning_params.batch_size_train rs = self.learning_params.train_K else: if not testing: bs = self.learning_params.batch_size_val rs = self.learning_params.val_K else: bs = self.learning_params.batch_size_test rs = self.learning_params.test_K ## COARSE loss only if self.mode == DecaMode.COARSE or (self.mode == DecaMode.DETAIL and self.deca.config.train_coarse): # landmark losses (only useful if coarse model is being trained # if training or lmk is not None: if lmk is not None: # if self.deca.config.use_landmarks: # d = losses # else: # d = metrics d = self._metric_or_loss(losses, metrics, self.deca.config.use_landmarks) if self.deca.config.useWlmk: d['landmark'] = \ lossfunc.weighted_landmark_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.lmk_weight else: d['landmark'] = \ lossfunc.landmark_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.lmk_weight d = self._metric_or_loss(losses, metrics, 'use_eye_distance' not in self.deca.config.keys() or self.deca.config.use_eye_distance) # losses['eye_distance'] = lossfunc.eyed_loss(predicted_landmarks, lmk) * self.deca.config.lmk_weight * 2 d['eye_distance'] = lossfunc.eyed_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.eyed d = self._metric_or_loss(losses, metrics, 'use_lip_distance' not in self.deca.config.keys() or self.deca.config.use_lip_distance) d['lip_distance'] = lossfunc.lipd_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.lipd d = self._metric_or_loss(losses, metrics, 'use_mouth_corner_distance' in self.deca.config.keys() and self.deca.config.use_mouth_corner_distance) d['mouth_corner_distance'] = lossfunc.mouth_corner_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.lipd if predicted_landmarks_mediapipe is not None and lmk_mp is not None: use_mediapipe_landmarks = self.deca.config.get('use_mediapipe_landmarks', False) d = self._metric_or_loss(losses, metrics, use_mediapipe_landmarks) d['landmark_mediapipe'] =lossfunc_mp.landmark_loss(predicted_landmarks_mediapipe[:geom_losses_idxs, ...], lmk_mp[:geom_losses_idxs, ...]) * self.deca.config.lmk_weight_mp d = self._metric_or_loss(losses, metrics, self.deca.config.get('use_eye_distance_mediapipe', False) ) d['eye_distance_mediapipe'] = lossfunc_mp.eyed_loss(predicted_landmarks_mediapipe[:geom_losses_idxs, ...], lmk_mp[:geom_losses_idxs, ...]) * self.deca.config.eyed_mp d = self._metric_or_loss(losses, metrics, self.deca.config.get('use_lip_distance_mediapipe', False) ) d['lip_distance_mediapipe'] = lossfunc_mp.lipd_loss(predicted_landmarks_mediapipe[:geom_losses_idxs, ...], lmk_mp[:geom_losses_idxs, ...]) * self.deca.config.lipd_mp d = self._metric_or_loss(losses, metrics, self.deca.config.get('use_mouth_corner_distance_mediapipe', False)) d['mouth_corner_distance_mediapipe'] = lossfunc_mp.mouth_corner_loss(predicted_landmarks_mediapipe[:geom_losses_idxs, ...], lmk_mp[:geom_losses_idxs, ...]) * self.deca.config.lipd_mp #TODO: fix this on the next iteration lipd_loss # d['lip_distance'] = lossfunc.lipd_loss(predicted_landmarks, lmk) * self.deca.config.lipd # photometric loss # if training or masks is not None: if masks is not None: # if self.deca.config.use_photometric: # d = losses # else: # d = metrics # d['photometric_texture'] = (masks * (predicted_images - images).abs()).mean() * self.deca.config.photow photometric = masks[:geom_losses_idxs, ...] * ((predicted_images[:geom_losses_idxs, ...] - images[:geom_losses_idxs, ...]).abs()) if 'photometric_normalization' not in self.deca.config.keys() or self.deca.config.photometric_normalization == 'mean': photometric = photometric.mean() elif self.deca.config.photometric_normalization == 'rel_mask_value': photometric = photometric * masks[:geom_losses_idxs, ...].mean(dim=tuple(range(1,masks.ndim)), keepdim=True) photometric = photometric.mean() elif self.deca.config.photometric_normalization == 'neg_rel_mask_value': mu = 1. - masks[:geom_losses_idxs, ...].mean(dim=tuple(range(1,masks.ndim)), keepdim=True) photometric = photometric * mu photometric = photometric.mean() elif self.deca.config.photometric_normalization == 'inv_rel_mask_value': mu = 1./ masks[:geom_losses_idxs, ...].mean(dim=tuple(range(1,masks.ndim)), keepdim=True) photometric = photometric * mu photometric = photometric.mean() elif self.deca.config.photometric_normalization == 'abs_mask_value': photometric = photometric * masks[:geom_losses_idxs, ...].sum(dim=tuple(range(1,masks.ndim)), keepdim=True) photometric = photometric.mean() else: raise ValueError(f"Invalid photometric loss normalization: '{self.deca.config.photometric_normalization}'") self._metric_or_loss(losses, metrics, self.deca.config.use_photometric)['photometric_texture'] = \ photometric * self.deca.config.photow if self.deca.vgg_loss is not None: vggl, _ = self.deca.vgg_loss( masks[:geom_losses_idxs, ...] * images[:geom_losses_idxs, ...], # masked input image masks[:geom_losses_idxs, ...] * predicted_images[:geom_losses_idxs, ...], # masked output image ) self._metric_or_loss(losses, metrics, self.deca.config.use_vgg)['vgg'] = vggl * self.deca.config.vggw if self.deca._has_neural_rendering(): predicted_translated_image = codedict["predicted_translated_image"] photometric_translated = (masks[:geom_losses_idxs, ...] * ( predicted_translated_image[:geom_losses_idxs, ...] - images[:geom_losses_idxs, ...]).abs()).mean() * self.deca.config.photow if self.deca.config.use_photometric: losses['photometric_translated_texture'] = photometric_translated else: metrics['photometric_translated_texture'] = photometric_translated if self.deca.vgg_loss is not None: vggl, _ = self.deca.vgg_loss( masks[:geom_losses_idxs, ...] * images[:geom_losses_idxs, ...], # masked input image masks[:geom_losses_idxs, ...] * predicted_translated_image[:geom_losses_idxs, ...], # masked output image ) self._metric_or_loss(losses, metrics, self.deca.config.use_vgg)['vgg_translated'] = vggl * self.deca.config.vggw else: raise ValueError("Is this line ever reached?") losses = self._compute_id_loss(codedict, batch, training, testing, losses, batch_size=bs, ring_size=rs) losses['shape_reg'] = (torch.sum(shapecode ** 2) / 2) * self.deca.config.shape_reg losses['expression_reg'] = (torch.sum(expcode ** 2) / 2) * self.deca.config.exp_reg losses['tex_reg'] = (torch.sum(texcode ** 2) / 2) * self.deca.config.tex_reg losses['light_reg'] = ((torch.mean(lightcode, dim=2)[:, :, None] - lightcode) ** 2).mean() * self.deca.config.light_reg if 'original_code' in codedict.keys(): # original jaw pose regularization if self.deca.config.get('exp_deca_jaw_pose', False) and \ 'deca_jaw_reg' in self.deca.config.keys() and self.deca.config.deca_jaw_reg > 0: jaw_pose_orig = codedict['original_code']['pose'][:, 3:] jaw_pose = codedict['posecode'][..., 3:] deca_jaw_pose_reg = (torch.sum((jaw_pose - jaw_pose_orig) ** 2) / 2) * self.deca.config.deca_jaw_reg losses['deca_jaw_pose_reg'] = deca_jaw_pose_reg if self.deca.config.get('exp_deca_global_pose', False) and \ 'deca_global_reg' in self.deca.config.keys() and self.deca.config.deca_global_reg > 0: global_pose_orig = codedict['original_code']['pose'][:, :3] global_pose = codedict['posecode'][..., :3] global_pose_reg = (torch.sum((global_pose - global_pose_orig) ** 2) / 2) * self.deca.config.deca_global_reg losses['deca_global_pose_reg'] = global_pose_reg # original expression regularization if 'deca_expression_reg' in self.deca.config.keys() and self.deca.config.deca_expression_reg > 0: expression_orig = codedict['original_code']['exp'] expression = codedict['expcode'] deca_expression_reg = (torch.sum((expression - expression_orig) ** 2) / 2) * self.deca.config.deca_expression_reg losses['deca_expression_reg'] = deca_expression_reg losses, metrics, codedict = self._compute_emonet_loss_wrapper(codedict, batch, training, testing, losses, metrics, prefix="coarse", image_key="predicted_images", with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), batch_size=bs, ring_size=rs) if self.deca._has_neural_rendering(): losses, metrics, codedict = self._compute_emonet_loss_wrapper(codedict, batch, training, testing, losses, metrics, prefix="coarse_translated", image_key="predicted_translated_image", with_grad=self.deca.config.use_emonet_loss and self.deca._has_neural_rendering(), batch_size=bs, ring_size=rs ) if self.au_loss is not None: # with torch.no_grad(): self._compute_au_loss(images, predicted_images, losses, metrics, "coarse", au=None, with_grad=self.deca.config.au_loss.use_as_loss and not self.deca._has_neural_rendering()) if self.deca._has_neural_rendering(): self._compute_au_loss(images, predicted_translated_image, losses, metrics, "coarse", au=None, with_grad=self.deca.config.au_loss.use_as_loss and self.deca._has_neural_rendering()) if self.lipread_loss is not None: # with torch.no_grad(): self._compute_lipread_loss(images, predicted_images, lmk, predicted_landmarks, losses, metrics, "coarse", with_grad=self.deca.config.lipread_loss.use_as_loss and not self.deca._has_neural_rendering()) if self.deca._has_neural_rendering(): self._compute_lipread_loss(images, predicted_translated_image, lmk, predicted_landmarks, losses, metrics, "coarse", with_grad=self.deca.config.lipread_loss.use_as_loss and self.deca._has_neural_rendering()) ## DETAIL loss only if self.mode == DecaMode.DETAIL: predicted_detailed_image = codedict["predicted_detailed_image"] uv_z = codedict["uv_z"] # UV displacement map uv_shading = codedict["uv_shading"] uv_vis_mask = codedict["uv_vis_mask"] # uv_mask of what is visible photometric_detailed = (masks[:geom_losses_idxs, ...] * ( predicted_detailed_image[:geom_losses_idxs, ...] - images[:geom_losses_idxs, ...]).abs()).mean() * self.deca.config.photow if self.deca.config.use_detailed_photo: losses['photometric_detailed_texture'] = photometric_detailed else: metrics['photometric_detailed_texture'] = photometric_detailed if self.deca.vgg_loss is not None: vggl, _ = self.deca.vgg_loss( masks[:geom_losses_idxs, ...] * images[:geom_losses_idxs, ...], # masked input image masks[:geom_losses_idxs, ...] * predicted_detailed_image[:geom_losses_idxs, ...], # masked output image ) self._metric_or_loss(losses, metrics, self.deca.config.use_vgg)['vgg_detailed'] = vggl * self.deca.config.vggw if self.deca._has_neural_rendering(): predicted_detailed_translated_image = codedict["predicted_detailed_translated_image"] photometric_detailed_translated = (masks[:geom_losses_idxs, ...] * ( predicted_detailed_translated_image[:geom_losses_idxs, ...] - images[:geom_losses_idxs, ...]).abs()).mean() * self.deca.config.photow if self.deca.config.use_detailed_photo: losses['photometric_translated_detailed_texture'] = photometric_detailed_translated else: metrics['photometric_translated_detailed_texture'] = photometric_detailed_translated if self.deca.vgg_loss is not None: vggl, _ = self.deca.vgg_loss( masks[:geom_losses_idxs, ...] * images[:geom_losses_idxs, ...], # masked input image masks[:geom_losses_idxs, ...] * predicted_detailed_translated_image[:geom_losses_idxs, ...], # masked output image ) self._metric_or_loss(losses, metrics, self.deca.config.use_vgg)[ 'vgg_detailed_translated'] = vggl * self.deca.config.vggw losses, metrics, codedict = self._compute_emonet_loss_wrapper(codedict, batch, training, testing, losses, metrics, prefix="detail", image_key = "predicted_detailed_image", with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), batch_size=bs, ring_size=rs) if self.deca._has_neural_rendering(): losses, metrics, codedict = self._compute_emonet_loss_wrapper(codedict, batch, training, testing, losses, metrics, prefix="detail_translated", image_key="predicted_detailed_translated_image", with_grad=self.deca.config.use_emonet_loss and self.deca._has_neural_rendering(), batch_size=bs, ring_size=rs) # if self.emonet_loss is not None: # self._compute_emotion_loss(images, predicted_detailed_image, losses, metrics, "detail", # with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), # batch_size=bs, ring_size=rs) # codedict["detail_valence_input"] = self.emonet_loss.input_emotion['valence'] # codedict["detail_arousal_input"] = self.emonet_loss.input_emotion['arousal'] # codedict["detail_expression_input"] = self.emonet_loss.input_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] # codedict["detail_valence_output"] = self.emonet_loss.output_emotion['valence'] # codedict["detail_arousal_output"] = self.emonet_loss.output_emotion['arousal'] # codedict["detail_expression_output"] = self.emonet_loss.output_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] # # if va is not None: # codedict["detail_valence_gt"] = va[:,0] # codedict["detail_arousal_gt"] = va[:,1] # if expr7 is not None: # codedict["detail_expression_gt"] = expr7 # if self.deca._has_neural_rendering(): # #TODO possible to make this more GPU efficient by not recomputing emotion for input image # self._compute_emotion_loss(images, predicted_detailed_translated_image, # losses, metrics, "detail_translated", # va, expr7, # with_grad= self.deca.config.use_emonet_loss and self.deca._has_neural_rendering(), # batch_size=bs, ring_size=rs) # # # codedict["coarse_valence_input"] = self.emonet_loss.input_emotion['valence'] # # codedict["coarse_arousal_input"] = self.emonet_loss.input_emotion['arousal'] # # codedict["coarse_expression_input"] = self.emonet_loss.input_emotion['expression'] # codedict["detail_translated_valence_output"] = self.emonet_loss.output_emotion['valence'] # codedict["detail_translated_arousal_output"] = self.emonet_loss.output_emotion['arousal'] # codedict["detail_translated_expression_output"] = self.emonet_loss.output_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] if self.au_loss is not None: self._compute_au_loss(images, predicted_images, losses, metrics, "detail", au=None, with_grad=self.deca.config.au_loss.use_as_loss and not self.deca._has_neural_rendering()) if self.deca._has_neural_rendering(): self._compute_au_loss(images, predicted_detailed_translated_image, losses, metrics, "detail", au=None, with_grad=self.deca.config.au_loss.use_as_loss and self.deca._has_neural_rendering()) for pi in range(3): # self.deca.face_attr_mask.shape[0]): if self.deca.config.sfsw[pi] != 0: # if pi==0: new_size = 256 # else: # new_size = 128 # if self.deca.config.uv_size != 256: # new_size = 128 uv_texture_patch = F.interpolate( uv_texture[:geom_losses_idxs, :, self.deca.face_attr_mask[pi][2]:self.deca.face_attr_mask[pi][3], self.deca.face_attr_mask[pi][0]:self.deca.face_attr_mask[pi][1]], [new_size, new_size], mode='bilinear') uv_texture_gt_patch = F.interpolate( uv_texture_gt[:geom_losses_idxs, :, self.deca.face_attr_mask[pi][2]:self.deca.face_attr_mask[pi][3], self.deca.face_attr_mask[pi][0]:self.deca.face_attr_mask[pi][1]], [new_size, new_size], mode='bilinear') uv_vis_mask_patch = F.interpolate( uv_vis_mask[:geom_losses_idxs, :, self.deca.face_attr_mask[pi][2]:self.deca.face_attr_mask[pi][3], self.deca.face_attr_mask[pi][0]:self.deca.face_attr_mask[pi][1]], [new_size, new_size], mode='bilinear') detail_l1 = (uv_texture_patch * uv_vis_mask_patch - uv_texture_gt_patch * uv_vis_mask_patch).abs().mean() * \ self.deca.config.sfsw[pi] if self.deca.config.use_detail_l1 and not self.deca._has_neural_rendering(): losses['detail_l1_{}'.format(pi)] = detail_l1 else: metrics['detail_l1_{}'.format(pi)] = detail_l1 if self.deca.config.use_detail_mrf and not self.deca._has_neural_rendering(): mrf = self.deca.perceptual_loss(uv_texture_patch * uv_vis_mask_patch, uv_texture_gt_patch * uv_vis_mask_patch) * \ self.deca.config.sfsw[pi] * self.deca.config.mrfwr losses['detail_mrf_{}'.format(pi)] = mrf else: with torch.no_grad(): mrf = self.deca.perceptual_loss(uv_texture_patch * uv_vis_mask_patch, uv_texture_gt_patch * uv_vis_mask_patch) * \ self.deca.config.sfsw[pi] * self.deca.config.mrfwr metrics['detail_mrf_{}'.format(pi)] = mrf if self.deca._has_neural_rendering(): # raise NotImplementedError("Gotta implement the texture extraction first.") translated_uv_texture = codedict["translated_uv_texture"] translated_uv_texture_patch = F.interpolate( translated_uv_texture[:geom_losses_idxs, :, self.deca.face_attr_mask[pi][2]:self.deca.face_attr_mask[pi][3], self.deca.face_attr_mask[pi][0]:self.deca.face_attr_mask[pi][1]], [new_size, new_size], mode='bilinear') translated_detail_l1 = (translated_uv_texture_patch * uv_vis_mask_patch - uv_texture_gt_patch * uv_vis_mask_patch).abs().mean() * \ self.deca.config.sfsw[pi] if self.deca.config.use_detail_l1: losses['detail_translated_l1_{}'.format(pi)] = translated_detail_l1 else: metrics['detail_translated_l1_{}'.format(pi)] = translated_detail_l1 if self.deca.config.use_detail_mrf: translated_mrf = self.deca.perceptual_loss(translated_uv_texture_patch * uv_vis_mask_patch, uv_texture_gt_patch * uv_vis_mask_patch) * \ self.deca.config.sfsw[pi] * self.deca.config.mrfwr losses['detail_translated_mrf_{}'.format(pi)] = translated_mrf else: with torch.no_grad(): mrf = self.deca.perceptual_loss(translated_uv_texture_patch * uv_vis_mask_patch, uv_texture_gt_patch * uv_vis_mask_patch) * \ self.deca.config.sfsw[pi] * self.deca.config.mrfwr metrics['detail_translated_mrf_{}'.format(pi)] = mrf # Old piece of debug code. Good to delete. # if pi == 2: # uv_texture_gt_patch_ = uv_texture_gt_patch # uv_texture_patch_ = uv_texture_patch # uv_vis_mask_patch_ = uv_vis_mask_patch losses['z_reg'] = torch.mean(uv_z.abs()) * self.deca.config.zregw losses['z_diff'] = lossfunc.shading_smooth_loss(uv_shading) * self.deca.config.zdiffw nonvis_mask = (1 - util.binary_erosion(uv_vis_mask)) losses['z_sym'] = (nonvis_mask * (uv_z - torch.flip(uv_z, [-1]).detach()).abs()).sum() * self.deca.config.zsymw if self.emotion_mlp is not None:# and not testing: mlp_losses, mlp_metrics = self.emotion_mlp.compute_loss( codedict, batch, training=training, pred_prefix="emo_mlp_") for key in mlp_losses.keys(): if key in losses.keys(): raise RuntimeError(f"Duplicate loss label {key}") losses[key] = self.deca.config.mlp_emotion_predictor_weight * mlp_losses[key] for key in mlp_metrics.keys(): if key in metrics.keys(): raise RuntimeError(f"Duplicate metric label {key}") # let's report the metrics (which are a superset of losses when it comes to EmoMLP) without the weight, # it's hard to plot the metrics otherwise metrics[key] = mlp_metrics[key] # metrics[key] = self.deca.config.mlp_emotion_predictor_weight * mlp_metrics[key] # else: # uv_texture_gt_patch_ = None # uv_texture_patch_ = None # uv_vis_mask_patch_ = None return losses, metrics def compute_loss(self, values, batch, training=True, testing=False) -> dict: """ The function used to compute the loss on a training batch. : training should be set to true when calling from training_step only """ losses, metrics = self._compute_loss(values, batch, training=training, testing=testing) all_loss = 0. losses_key = losses.keys() for key in losses_key: all_loss = all_loss + losses[key] # losses['all_loss'] = all_loss losses = {'loss_' + key: value for key, value in losses.items()} # add prefix loss for better logging losses['loss'] = all_loss # add metrics that do not effect the loss function (if any) for key in metrics.keys(): losses['metric_' + key] = metrics[key] return losses def _val_to_be_logged(self, d): if not hasattr(self, 'val_dict_list'): self.val_dict_list = [] self.val_dict_list += [d] def _train_to_be_logged(self, d): if not hasattr(self, 'train_dict_list'): self.train_dict_list = [] self.train_dict_list += [d] def validation_step(self, batch, batch_idx, dataloader_idx=None): """ Training step override of pytorch lightning module. It makes the encoding, decoding passes, computes the loss and logs the losses/visualizations. :param batch: Batch of images to encode. batch['image'] [batch_size, ring_size, 3, image_size, image_size]. For a training forward pass, additional corresponding data are necessery such as 'landmarks' and 'masks'. :batch_idx batch index """ with torch.no_grad(): training = False values = self.encode(batch, training=training) values = self.decode(values, training=training) losses_and_metrics = self.compute_loss(values, batch, training=training) #### self.log_dict(losses_and_metrics, on_step=False, on_epoch=True) # prefix = str(self.mode.name).lower() prefix = self._get_logging_prefix() # if dataloader_idx is not None: # dataloader_str = str(dataloader_idx) + "_" # else: dataloader_str = '' stage_str = dataloader_str + 'val_' # losses_and_metrics_to_log = {prefix + dataloader_str +'_val_' + key: value.detach().cpu() for key, value in losses_and_metrics.items()} # losses_and_metrics_to_log = {prefix + '_' + stage_str + key: value.detach() for key, value in losses_and_metrics.items()} losses_and_metrics_to_log = {prefix + '_' + stage_str + key: value.detach().cpu().item() for key, value in losses_and_metrics.items()} losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = self.current_epoch # losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) # log val_loss also without any prefix for a model checkpoint to track it losses_and_metrics_to_log[stage_str + 'loss'] = losses_and_metrics_to_log[prefix + '_' + stage_str + 'loss'] losses_and_metrics_to_log[prefix + '_' + stage_str + 'step'] = self.global_step losses_and_metrics_to_log[prefix + '_' + stage_str + 'batch_idx'] = batch_idx losses_and_metrics_to_log[stage_str + 'step'] = self.global_step losses_and_metrics_to_log[stage_str + 'batch_idx'] = batch_idx losses_and_metrics_to_log[prefix + '_' + stage_str + 'mem_usage'] = self.process.memory_info().rss losses_and_metrics_to_log[stage_str + 'mem_usage'] = self.process.memory_info().rss # self._val_to_be_logged(losses_and_metrics_to_log) if self.logger is not None: self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch # recommended if self.trainer.is_global_zero: if self.deca.config.val_vis_frequency > 0: if batch_idx % self.deca.config.val_vis_frequency == 0: uv_detail_normals = None if 'uv_detail_normals' in values.keys(): uv_detail_normals = values['uv_detail_normals'] visualizations, grid_image = self._visualization_checkpoint(values['verts'], values['trans_verts'], values['ops'], uv_detail_normals, values, batch_idx, stage_str[:-1], prefix) vis_dict = self._create_visualizations_to_log(stage_str[:-1], visualizations, values, batch_idx, indices=0, dataloader_idx=dataloader_idx) # image = Image(grid_image, caption="full visualization") # vis_dict[prefix + '_val_' + "visualization"] = image if isinstance(self.logger, WandbLogger): self.logger.log_metrics(vis_dict) return None def _get_logging_prefix(self): prefix = self.stage_name + str(self.mode.name).lower() return prefix def test_step(self, batch, batch_idx, dataloader_idx=None): """ Testing step override of pytorch lightning module. It makes the encoding, decoding passes, computes the loss and logs the losses/visualizations without gradient :param batch: Batch of images to encode. batch['image'] [batch_size, ring_size, 3, image_size, image_size]. For a training forward pass, additional corresponding data are necessery such as 'landmarks' and 'masks'. :batch_idx batch index """ prefix = self._get_logging_prefix() losses_and_metrics_to_log = {} # if dataloader_idx is not None: # dataloader_str = str(dataloader_idx) + "_" # else: dataloader_str = '' stage_str = dataloader_str + 'test_' with torch.no_grad(): training = False testing = True values = self.encode(batch, training=training) values = self.decode(values, training=training) if 'mask' in batch.keys(): losses_and_metrics = self.compute_loss(values, batch, training=False, testing=testing) # losses_and_metrics_to_log = {prefix + '_' + stage_str + key: value.detach().cpu() for key, value in losses_and_metrics.items()} losses_and_metrics_to_log = {prefix + '_' + stage_str + key: value.detach().cpu().item() for key, value in losses_and_metrics.items()} else: losses_and_metric = None # losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = self.current_epoch # losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) # losses_and_metrics_to_log[prefix + '_' + stage_str + 'step'] = torch.tensor(self.global_step, device=self.device) # losses_and_metrics_to_log[prefix + '_' + stage_str + 'batch_idx'] = torch.tensor(batch_idx, device=self.device) # losses_and_metrics_to_log[stage_str + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) # losses_and_metrics_to_log[stage_str + 'step'] = torch.tensor(self.global_step, device=self.device) # losses_and_metrics_to_log[stage_str + 'batch_idx'] = torch.tensor(batch_idx, device=self.device) losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = self.current_epoch losses_and_metrics_to_log[prefix + '_' + stage_str + 'step'] = self.global_step losses_and_metrics_to_log[prefix + '_' + stage_str + 'batch_idx'] = batch_idx losses_and_metrics_to_log[prefix + '_' + stage_str + 'mem_usage'] = self.process.memory_info().rss losses_and_metrics_to_log[stage_str + 'epoch'] = self.current_epoch losses_and_metrics_to_log[stage_str + 'step'] = self.global_step losses_and_metrics_to_log[stage_str + 'batch_idx'] = batch_idx losses_and_metrics_to_log[stage_str + 'mem_usage'] = self.process.memory_info().rss if self.logger is not None: # self.logger.log_metrics(losses_and_metrics_to_log) self.log_dict(losses_and_metrics_to_log, sync_dist=True, on_step=False, on_epoch=True) # if self.global_step % 200 == 0: uv_detail_normals = None if 'uv_detail_normals' in values.keys(): uv_detail_normals = values['uv_detail_normals'] if self.deca.config.test_vis_frequency > 0: # Log visualizations every once in a while if batch_idx % self.deca.config.test_vis_frequency == 0: # if self.trainer.is_global_zero: visualizations, grid_image = self._visualization_checkpoint(values['verts'], values['trans_verts'], values['ops'], uv_detail_normals, values, self.global_step, stage_str[:-1], prefix) visdict = self._create_visualizations_to_log(stage_str[:-1], visualizations, values, batch_idx, indices=0, dataloader_idx=dataloader_idx) self.logger.log_metrics(visdict) return None @property def process(self): if not hasattr(self,"process_"): self.process_ = psutil.Process(os.getpid()) return self.process_ def training_step(self, batch, batch_idx, *args, **kwargs): #, debug=True): """ Training step override of pytorch lightning module. It makes the encoding, decoding passes, computes the loss and logs the losses/visualizations. :param batch: Batch of images to encode. batch['image'] [batch_size, ring_size, 3, image_size, image_size]. For a training forward pass, additional corresponding data are necessery such as 'landmarks' and 'masks'. :batch_idx batch index """ values = self.encode(batch, training=True) values = self.decode(values, training=True) losses_and_metrics = self.compute_loss(values, batch, training=True) uv_detail_normals = None if 'uv_detail_normals' in values.keys(): uv_detail_normals = values['uv_detail_normals'] # prefix = str(self.mode.name).lower() prefix = self._get_logging_prefix() # losses_and_metrics_to_log = {prefix + '_train_' + key: value.detach().cpu() for key, value in losses_and_metrics.items()} # losses_and_metrics_to_log = {prefix + '_train_' + key: value.detach() for key, value in losses_and_metrics.items()} losses_and_metrics_to_log = {prefix + '_train_' + key: value.detach().cpu().item() for key, value in losses_and_metrics.items()} # losses_and_metrics_to_log[prefix + '_train_' + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) losses_and_metrics_to_log[prefix + '_train_' + 'epoch'] = self.current_epoch losses_and_metrics_to_log[prefix + '_train_' + 'step'] = self.global_step losses_and_metrics_to_log[prefix + '_train_' + 'batch_idx'] = batch_idx losses_and_metrics_to_log[prefix + '_' + "train_" + 'mem_usage'] = self.process.memory_info().rss # losses_and_metrics_to_log['train_' + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) losses_and_metrics_to_log['train_' + 'epoch'] = self.current_epoch losses_and_metrics_to_log['train_' + 'step'] = self.global_step losses_and_metrics_to_log['train_' + 'batch_idx'] = batch_idx losses_and_metrics_to_log["train_" + 'mem_usage'] = self.process.memory_info().rss # log loss also without any prefix for a model checkpoint to track it losses_and_metrics_to_log['loss'] = losses_and_metrics_to_log[prefix + '_train_loss'] if self.logger is not None: self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch, # recommended if self.deca.config.train_vis_frequency > 0: if self.global_step % self.deca.config.train_vis_frequency == 0: if self.trainer.is_global_zero: visualizations, grid_image = self._visualization_checkpoint(values['verts'], values['trans_verts'], values['ops'], uv_detail_normals, values, batch_idx, "train", prefix) visdict = self._create_visualizations_to_log('train', visualizations, values, batch_idx, indices=0) if isinstance(self.logger, WandbLogger): self.logger.log_metrics(visdict)#, step=self.global_step) # self.log_dict(visdict, sync_dist=True) # self.log_dict(losses_and_metrics_to_log, on_step=True, on_epoch=False) # log per step # self.log_dict(losses_and_metrics_to_log, on_step=True, on_epoch=True) # log per both # return losses_and_metrics return losses_and_metrics['loss'] ### STEP ENDS ARE PROBABLY NOT NECESSARY BUT KEEP AN EYE ON THEM IF MULI-GPU TRAINING DOESN'T WORK # def training_step_end(self, batch_parts): # return self._step_end(batch_parts) # # def validation_step_end(self, batch_parts): # return self._step_end(batch_parts) # # def _step_end(self, batch_parts): # # gpu_0_prediction = batch_parts.pred[0]['pred'] # # gpu_1_prediction = batch_parts.pred[1]['pred'] # N = len(batch_parts) # loss_dict = {} # for key in batch_parts[0]: # for i in range(N): # if key not in loss_dict.keys(): # loss_dict[key] = batch_parts[i] # else: # loss_dict[key] = batch_parts[i] # loss_dict[key] = loss_dict[key] / N # return loss_dict def vae_2_str(self, valence=None, arousal=None, affnet_expr=None, expr7=None, prefix=""): caption = "" if len(prefix) > 0: prefix += "_" if valence is not None and not np.isnan(valence).any(): caption += prefix + "valence= %.03f\n" % valence if arousal is not None and not np.isnan(arousal).any(): caption += prefix + "arousal= %.03f\n" % arousal if affnet_expr is not None and not np.isnan(affnet_expr).any(): caption += prefix + "expression= %s \n" % AffectNetExpressions(affnet_expr).name if expr7 is not None and not np.isnan(expr7).any(): caption += prefix +"expression= %s \n" % Expression7(expr7).name return caption def _create_visualizations_to_log(self, stage, visdict, values, step, indices=None, dataloader_idx=None, output_dir=None): mode_ = str(self.mode.name).lower() prefix = self._get_logging_prefix() output_dir = output_dir or self.inout_params.full_run_dir log_dict = {} for key in visdict.keys(): images = _torch_image2np(visdict[key]) if images.dtype == np.float32 or images.dtype == np.float64 or images.dtype == np.float16: images = np.clip(images, 0, 1) if indices is None: indices = np.arange(images.shape[0]) if isinstance(indices, int): indices = [indices,] if isinstance(indices, str) and indices == 'all': image = np.concatenate([images[i] for i in range(images.shape[0])], axis=1) savepath = Path(f'{output_dir}/{prefix}_{stage}/{key}/{self.current_epoch:04d}_{step:04d}_all.png') # im2log = Image(image, caption=key) if isinstance(self.logger, WandbLogger): im2log = _log_wandb_image(savepath, image) else: im2log = _log_array_image(savepath, image) name = prefix + "_" + stage + "_" + key if dataloader_idx is not None: name += "/dataloader_idx_" + str(dataloader_idx) log_dict[name] = im2log else: for i in indices: caption = key + f" batch_index={step}\n" caption += key + f" index_in_batch={i}\n" if self.emonet_loss is not None: if key == 'inputs': if mode_ + "_valence_input" in values.keys(): caption += self.vae_2_str( values[mode_ + "_valence_input"][i].detach().cpu().item(), values[mode_ + "_arousal_input"][i].detach().cpu().item(), np.argmax(values[mode_ + "_expression_input"][i].detach().cpu().numpy()), prefix="emonet") + "\n" if 'va' in values.keys() and mode_ + "valence_gt" in values.keys(): # caption += self.vae_2_str( # values[mode_ + "_valence_gt"][i].detach().cpu().item(), # values[mode_ + "_arousal_gt"][i].detach().cpu().item(), caption += self.vae_2_str( values[mode_ + "valence_gt"][i].detach().cpu().item(), values[mode_ + "arousal_gt"][i].detach().cpu().item(), prefix="gt") + "\n" if 'expr7' in values.keys() and mode_ + "_expression_gt" in values.keys(): caption += "\n" + self.vae_2_str( expr7=values[mode_ + "_expression_gt"][i].detach().cpu().numpy(), prefix="gt") + "\n" if 'affectnetexp' in values.keys() and mode_ + "_expression_gt" in values.keys(): caption += "\n" + self.vae_2_str( affnet_expr=values[mode_ + "_expression_gt"][i].detach().cpu().numpy(), prefix="gt") + "\n" elif 'geometry_detail' in key: if "emo_mlp_valence" in values.keys(): caption += self.vae_2_str( values["emo_mlp_valence"][i].detach().cpu().item(), values["emo_mlp_arousal"][i].detach().cpu().item(), prefix="mlp") if 'emo_mlp_expr_classification' in values.keys(): caption += "\n" + self.vae_2_str( affnet_expr=values["emo_mlp_expr_classification"][i].detach().cpu().argmax().numpy(), prefix="mlp") + "\n" elif key == 'output_images_' + mode_: if mode_ + "_valence_output" in values.keys(): caption += self.vae_2_str(values[mode_ + "_valence_output"][i].detach().cpu().item(), values[mode_ + "_arousal_output"][i].detach().cpu().item(), np.argmax(values[mode_ + "_expression_output"][i].detach().cpu().numpy())) + "\n" elif key == 'output_translated_images_' + mode_: if mode_ + "_translated_valence_output" in values.keys(): caption += self.vae_2_str(values[mode_ + "_translated_valence_output"][i].detach().cpu().item(), values[mode_ + "_translated_arousal_output"][i].detach().cpu().item(), np.argmax(values[mode_ + "_translated_expression_output"][i].detach().cpu().numpy())) + "\n" # elif key == 'output_images_detail': # caption += "\n" + self.vae_2_str(values["detail_output_valence"][i].detach().cpu().item(), # values["detail_output_valence"][i].detach().cpu().item(), # np.argmax(values["detail_output_expression"][ # i].detach().cpu().numpy())) savepath = Path(f'{output_dir}/{prefix}_{stage}/{key}/{self.current_epoch:04d}_{step:04d}_{i:02d}.png') image = images[i] # im2log = Image(image, caption=caption) if isinstance(self.logger, WandbLogger): im2log = _log_wandb_image(savepath, image, caption) elif self.logger is not None: im2log = _log_array_image(savepath, image, caption) else: im2log = _log_array_image(None, image, caption) name = prefix + "_" + stage + "_" + key if dataloader_idx is not None: name += "/dataloader_idx_" + str(dataloader_idx) log_dict[name] = im2log return log_dict def _visualization_checkpoint(self, verts, trans_verts, ops, uv_detail_normals, additional, batch_idx, stage, prefix, save=False): batch_size = verts.shape[0] visind = np.arange(batch_size) shape_images = self.deca.render.render_shape(verts, trans_verts) if uv_detail_normals is not None: detail_normal_images = F.grid_sample(uv_detail_normals.detach(), ops['grid'].detach(), align_corners=False) shape_detail_images = self.deca.render.render_shape(verts, trans_verts, detail_normal_images=detail_normal_images) else: shape_detail_images = None visdict = {} if 'images' in additional.keys(): visdict['inputs'] = additional['images'][visind] if 'images' in additional.keys() and 'lmk' in additional.keys(): visdict['landmarks_gt'] = util.tensor_vis_landmarks(additional['images'][visind], additional['lmk'][visind]) if 'images' in additional.keys() and 'predicted_landmarks' in additional.keys(): visdict['landmarks_predicted'] = util.tensor_vis_landmarks(additional['images'][visind], additional['predicted_landmarks'][visind]) if 'predicted_images' in additional.keys(): visdict['output_images_coarse'] = additional['predicted_images'][visind] if 'predicted_translated_image' in additional.keys() and additional['predicted_translated_image'] is not None: visdict['output_translated_images_coarse'] = additional['predicted_translated_image'][visind] visdict['geometry_coarse'] = shape_images[visind] if shape_detail_images is not None: visdict['geometry_detail'] = shape_detail_images[visind] if 'albedo_images' in additional.keys(): visdict['albedo_images'] = additional['albedo_images'][visind] if 'masks' in additional.keys(): visdict['mask'] = additional['masks'].repeat(1, 3, 1, 1)[visind] if 'albedo' in additional.keys(): visdict['albedo'] = additional['albedo'][visind] if 'predicted_detailed_image' in additional.keys() and additional['predicted_detailed_image'] is not None: visdict['output_images_detail'] = additional['predicted_detailed_image'][visind] if 'predicted_detailed_translated_image' in additional.keys() and additional['predicted_detailed_translated_image'] is not None: visdict['output_translated_images_detail'] = additional['predicted_detailed_translated_image'][visind] if 'shape_detail_images' in additional.keys(): visdict['shape_detail_images'] = additional['shape_detail_images'][visind] if 'uv_detail_normals' in additional.keys(): visdict['uv_detail_normals'] = additional['uv_detail_normals'][visind] * 0.5 + 0.5 if 'uv_texture_patch' in additional.keys(): visdict['uv_texture_patch'] = additional['uv_texture_patch'][visind] if 'uv_texture_gt' in additional.keys(): visdict['uv_texture_gt'] = additional['uv_texture_gt'][visind] if 'translated_uv_texture' in additional.keys() and additional['translated_uv_texture'] is not None: visdict['translated_uv_texture'] = additional['translated_uv_texture'][visind] if 'uv_vis_mask_patch' in additional.keys(): visdict['uv_vis_mask_patch'] = additional['uv_vis_mask_patch'][visind] if save: savepath = f'{self.inout_params.full_run_dir}/{prefix}_{stage}/combined/{self.current_epoch:04d}_{batch_idx:04d}.png' Path(savepath).parent.mkdir(exist_ok=True, parents=True) visualization_image = self.deca.visualize(visdict, savepath) return visdict, visualization_image[..., [2, 1, 0]] else: visualization_image = None return visdict, None def _get_trainable_parameters(self): trainable_params = [] if self.mode == DecaMode.COARSE: trainable_params += self.deca._get_coarse_trainable_parameters() elif self.mode == DecaMode.DETAIL: trainable_params += self.deca._get_detail_trainable_parameters() else: raise ValueError(f"Invalid deca mode: {self.mode}") if self.emotion_mlp is not None: trainable_params += list(self.emotion_mlp.parameters()) if self.emonet_loss is not None: trainable_params += self.emonet_loss._get_trainable_params() if self.deca.id_loss is not None: trainable_params += self.deca.id_loss._get_trainable_params() return trainable_params def configure_optimizers(self): # optimizer = torch.optim.Adam(self.parameters(), lr=1e-3) print("Configuring optimizer") trainable_params = self._get_trainable_parameters() if self.learning_params.optimizer == 'Adam': self.deca.opt = torch.optim.Adam( trainable_params, lr=self.learning_params.learning_rate, amsgrad=False) elif self.config.learning.optimizer == 'AdaBound': self.deca.opt = adabound.AdaBound( trainable_params, lr=self.config.learning.learning_rate, final_lr=self.config.learning.final_learning_rate ) elif self.learning_params.optimizer == 'SGD': self.deca.opt = torch.optim.SGD( trainable_params, lr=self.learning_params.learning_rate) else: raise ValueError(f"Unsupported optimizer: '{self.learning_params.optimizer}'") optimizers = [self.deca.opt] schedulers = [] if 'learning_rate_decay' in self.learning_params.keys(): scheduler = torch.optim.lr_scheduler.ExponentialLR(self.deca.opt, gamma=self.learning_params.learning_rate_decay) schedulers += [scheduler] if len(schedulers) == 0: return self.deca.opt return optimizers, schedulers class DECA(torch.nn.Module): """ The original DECA class which contains the encoders, FLAME decoder and the detail decoder. """ def __init__(self, config): """ :config corresponds to a model_params from DecaModule """ super().__init__() # ID-MRF perceptual loss (kept here from the original DECA implementation) self.perceptual_loss = None # Face Recognition loss self.id_loss = None # VGG feature loss self.vgg_loss = None self._reconfigure(config) self._reinitialize() def _dirty_init(self): pass # not used here, implemented for EMICA def get_input_image_size(self): return (self.config.image_size, self.config.image_size) def _reconfigure(self, config): self.config = config self.n_param = config.n_shape + config.n_tex + config.n_exp + config.n_pose + config.n_cam + config.n_light # identity-based detail code self.n_detail = config.n_detail # emotion-based detail code (deprecated, not use by DECA or EMOCA) self.n_detail_emo = config.n_detail_emo if 'n_detail_emo' in config.keys() else 0 # count the size of the conidition vector if 'detail_conditioning' in self.config.keys(): self.n_cond = 0 if 'globalpose' in self.config.detail_conditioning: self.n_cond += 3 if 'jawpose' in self.config.detail_conditioning: self.n_cond += 3 if 'identity' in self.config.detail_conditioning: self.n_cond += config.n_shape if 'expression' in self.config.detail_conditioning: self.n_cond += config.n_exp else: self.n_cond = 3 + config.n_exp self.mode = DecaMode[str(config.mode).upper()] self._create_detail_generator() self._init_deep_losses() self._setup_neural_rendering() def _reinitialize(self): self._create_model() self._setup_renderer() self._init_deep_losses() self.face_attr_mask = util.load_local_mask(image_size=self.config.uv_size, mode='bbx') def _get_num_shape_params(self): return self.config.n_shape def _init_deep_losses(self): """ Initialize networks for deep losses """ # TODO: ideally these networks should be moved out the DECA class and into DecaModule, # but that would break backwards compatility with the original DECA and would not be able to load DECA's weights if 'mrfwr' not in self.config.keys() or self.config.mrfwr == 0: self.perceptual_loss = None else: if self.perceptual_loss is None: self.perceptual_loss = lossfunc.IDMRFLoss().eval() self.perceptual_loss.requires_grad_(False) # TODO, move this to the constructor if 'idw' not in self.config.keys() or self.config.idw == 0: self.id_loss = None else: if self.id_loss is None: id_metric = self.config.id_metric if 'id_metric' in self.config.keys() else None id_trainable = self.config.id_trainable if 'id_trainable' in self.config.keys() else False self.id_loss_start_step = self.config.id_loss_start_step if 'id_loss_start_step' in self.config.keys() else 0 self.id_loss = lossfunc.VGGFace2Loss(self.config.pretrained_vgg_face_path, id_metric, id_trainable) self.id_loss.freeze_nontrainable_layers() if 'vggw' not in self.config.keys() or self.config.vggw == 0: self.vgg_loss = None else: if self.vgg_loss is None: vgg_loss_batch_norm = 'vgg_loss_batch_norm' in self.config.keys() and self.config.vgg_loss_batch_norm self.vgg_loss = VGG19Loss(dict(zip(self.config.vgg_loss_layers, self.config.lambda_vgg_layers)), batch_norm=vgg_loss_batch_norm).eval() self.vgg_loss.requires_grad_(False) # TODO, move this to the constructor def _setup_renderer(self): self.render = SRenderY(self.config.image_size, obj_filename=self.config.topology_path, uv_size=self.config.uv_size) # .to(self.device) # face mask for rendering details mask = imread(self.config.face_mask_path).astype(np.float32) / 255. mask = torch.from_numpy(mask[:, :, 0])[None, None, :, :].contiguous() self.uv_face_mask = F.interpolate(mask, [self.config.uv_size, self.config.uv_size]) mask = imread(self.config.face_eye_mask_path).astype(np.float32) / 255. mask = torch.from_numpy(mask[:, :, 0])[None, None, :, :].contiguous() uv_face_eye_mask = F.interpolate(mask, [self.config.uv_size, self.config.uv_size]) self.register_buffer('uv_face_eye_mask', uv_face_eye_mask) # displacement mask is deprecated and not used by DECA or EMOCA if 'displacement_mask' in self.config.keys(): displacement_mask_ = 1-np.load(self.config.displacement_mask).astype(np.float32) # displacement_mask_ = np.load(self.config.displacement_mask).astype(np.float32) displacement_mask_ = torch.from_numpy(displacement_mask_)[None, None, ...].contiguous() displacement_mask_ = F.interpolate(displacement_mask_, [self.config.uv_size, self.config.uv_size]) self.register_buffer('displacement_mask', displacement_mask_) ## displacement correct if os.path.isfile(self.config.fixed_displacement_path): fixed_dis = np.load(self.config.fixed_displacement_path) fixed_uv_dis = torch.tensor(fixed_dis).float() else: fixed_uv_dis = torch.zeros([512, 512]).float() print("Warning: fixed_displacement_path not found, using zero displacement") self.register_buffer('fixed_uv_dis', fixed_uv_dis) def uses_texture(self): if 'use_texture' in self.config.keys(): return self.config.use_texture return True # true by default def _disable_texture(self, remove_from_model=False): self.config.use_texture = False if remove_from_model: self.flametex = None def _enable_texture(self): self.config.use_texture = True def _has_neural_rendering(self): return hasattr(self.config, "neural_renderer") and bool(self.config.neural_renderer) def _setup_neural_rendering(self): if self._has_neural_rendering(): if self.config.neural_renderer.class_ == "StarGAN": print("Creating StarGAN neural renderer") self.image_translator = StarGANWrapper(self.config.neural_renderer.cfg, self.config.neural_renderer.stargan_repo) else: raise ValueError(f"Unsupported neural renderer class '{self.config.neural_renderer.class_}'") if self.image_translator.background_mode == "input": if self.config.background_from_input not in [True, "input"]: raise NotImplementedError("The background mode of the neural renderer and deca is not synchronized. " "Background should be inpainted from the input") elif self.image_translator.background_mode == "black": if self.config.background_from_input not in [False, "black"]: raise NotImplementedError("The background mode of the neural renderer and deca is not synchronized. " "Background should be black.") elif self.image_translator.background_mode == "none": if self.config.background_from_input not in ["none"]: raise NotImplementedError("The background mode of the neural renderer and deca is not synchronized. " "The background should not be handled") else: raise NotImplementedError(f"Unsupported mode of the neural renderer backroungd: " f"'{self.image_translator.background_mode}'") def _create_detail_generator(self): #backwards compatibility hack: if hasattr(self, 'D_detail'): if (not "detail_conditioning_type" in self.config.keys() or self.config.detail_conditioning_type == "concat") \ and isinstance(self.D_detail, Generator): return if self.config.detail_conditioning_type == "adain" and isinstance(self.D_detail, GeneratorAdaIn): return print("[WARNING]: We are reinitializing the detail generator!") del self.D_detail # just to make sure we free the CUDA memory, probably not necessary if not "detail_conditioning_type" in self.config.keys() or str(self.config.detail_conditioning_type).lower() == "concat": # concatenates detail latent and conditioning (this one is used by DECA/EMOCA) print("Creating classic detail generator.") self.D_detail = Generator(latent_dim=self.n_detail + self.n_detail_emo + self.n_cond, out_channels=1, out_scale=0.01, sample_mode='bilinear') elif str(self.config.detail_conditioning_type).lower() == "adain": # conditioning passed in through adain layers (this one is experimental and not currently used) print("Creating AdaIn detail generator.") self.D_detail = GeneratorAdaIn(self.n_detail + self.n_detail_emo, self.n_cond, out_channels=1, out_scale=0.01, sample_mode='bilinear') else: raise NotImplementedError(f"Detail conditioning invalid: '{self.config.detail_conditioning_type}'") def _create_model(self): # 1) build coarse encoder e_flame_type = 'ResnetEncoder' if 'e_flame_type' in self.config.keys(): e_flame_type = self.config.e_flame_type if e_flame_type == 'ResnetEncoder': self.E_flame = ResnetEncoder(outsize=self.n_param) elif e_flame_type[:4] == 'swin': self.E_flame = SwinEncoder(outsize=self.n_param, img_size=self.config.image_size, swin_type=e_flame_type) else: raise ValueError(f"Invalid 'e_flame_type' = {e_flame_type}") flame_cfg = copy.deepcopy(self.config) flame_cfg.n_shape = self._get_num_shape_params() if 'flame_mediapipe_lmk_embedding_path' not in flame_cfg.keys(): self.flame = FLAME(flame_cfg) else: self.flame = FLAME_mediapipe(flame_cfg) if self.uses_texture(): self.flametex = FLAMETex(self.config) else: self.flametex = None # 2) build detail encoder e_detail_type = 'ResnetEncoder' if 'e_detail_type' in self.config.keys(): e_detail_type = self.config.e_detail_type if e_detail_type == 'ResnetEncoder': self.E_detail = ResnetEncoder(outsize=self.n_detail + self.n_detail_emo) elif e_flame_type[:4] == 'swin': self.E_detail = SwinEncoder(outsize=self.n_detail + self.n_detail_emo, img_size=self.config.image_size, swin_type=e_detail_type) else: raise ValueError(f"Invalid 'e_detail_type'={e_detail_type}") self._create_detail_generator() # self._load_old_checkpoint() def _get_coarse_trainable_parameters(self): print("Add E_flame.parameters() to the optimizer") return list(self.E_flame.parameters()) def _get_detail_trainable_parameters(self): trainable_params = [] if self.config.train_coarse: trainable_params += self._get_coarse_trainable_parameters() print("Add E_flame.parameters() to the optimizer") trainable_params += list(self.E_detail.parameters()) print("Add E_detail.parameters() to the optimizer") trainable_params += list(self.D_detail.parameters()) print("Add D_detail.parameters() to the optimizer") return trainable_params def train(self, mode: bool = True): super().train(mode) if mode: if self.mode == DecaMode.COARSE: self.E_flame.train() # print("Setting E_flame to train") self.E_detail.eval() # print("Setting E_detail to eval") self.D_detail.eval() # print("Setting D_detail to eval") elif self.mode == DecaMode.DETAIL: if self.config.train_coarse: # print("Setting E_flame to train") self.E_flame.train() else: # print("Setting E_flame to eval") self.E_flame.eval() self.E_detail.train() # print("Setting E_detail to train") self.D_detail.train() # print("Setting D_detail to train") else: raise ValueError(f"Invalid mode '{self.mode}'") else: self.E_flame.eval() # print("Setting E_flame to eval") self.E_detail.eval() # print("Setting E_detail to eval") self.D_detail.eval() # print("Setting D_detail to eval") # these are set to eval no matter what, they're never being trained (the FLAME shape and texture spaces are pretrained) self.flame.eval() if self.flametex is not None: self.flametex.eval() return self def _load_old_checkpoint(self): """ Loads the DECA model weights from the original DECA implementation: https://github.com/YadiraF/DECA """ if self.config.resume_training: model_path = self.config.pretrained_modelpath print(f"Loading model state from '{model_path}'") checkpoint = torch.load(model_path) # model util.copy_state_dict(self.E_flame.state_dict(), checkpoint['E_flame']) # util.copy_state_dict(self.opt.state_dict(), checkpoint['opt']) # deprecate # detail model if 'E_detail' in checkpoint.keys(): util.copy_state_dict(self.E_detail.state_dict(), checkpoint['E_detail']) util.copy_state_dict(self.D_detail.state_dict(), checkpoint['D_detail']) # training state self.start_epoch = 0 # checkpoint['epoch'] self.start_iter = 0 # checkpoint['iter'] else: print('Start training from scratch') self.start_epoch = 0 self.start_iter = 0 def _encode_flame(self, images, **kwargs): return self.E_flame(images) def decompose_code(self, code): ''' config.n_shape + config.n_tex + config.n_exp + config.n_pose + config.n_cam + config.n_light ''' code_list = [] # num_list = [self.config.n_shape, self.config.n_tex, self.config.n_exp, self.config.n_pose, self.config.n_cam, # self.config.n_light] num_list = [self._get_num_shape_params(), self.config.n_tex, self.config.n_exp, self.config.n_pose, self.config.n_cam, self.config.n_light] start = 0 for i in range(len(num_list)): code_list.append(code[:, start:start + num_list[i]]) start = start + num_list[i] # shapecode, texcode, expcode, posecode, cam, lightcode = code_list code_list[-1] = code_list[-1].reshape(code.shape[0], 9, 3) return code_list, None def displacement2normal(self, uv_z, coarse_verts, coarse_normals, detach=True): """ Converts the displacement uv map (uv_z) and coarse_verts to a normal map coarse_normals. """ batch_size = uv_z.shape[0] uv_coarse_vertices = self.render.world2uv(coarse_verts)#.detach() if detach: uv_coarse_vertices = uv_coarse_vertices.detach() uv_coarse_normals = self.render.world2uv(coarse_normals)#.detach() if detach: uv_coarse_normals = uv_coarse_normals.detach() uv_z = uv_z * self.uv_face_eye_mask # detail vertices = coarse vertice + predicted displacement*normals + fixed displacement*normals uv_detail_vertices = uv_coarse_vertices + \ uv_z * uv_coarse_normals + \ self.fixed_uv_dis[None, None, :,:] * uv_coarse_normals #.detach() dense_vertices = uv_detail_vertices.permute(0, 2, 3, 1).reshape([batch_size, -1, 3]) uv_detail_normals = util.vertex_normals(dense_vertices, self.render.dense_faces.expand(batch_size, -1, -1)) uv_detail_normals = uv_detail_normals.reshape( [batch_size, uv_coarse_vertices.shape[2], uv_coarse_vertices.shape[3], 3]).permute(0, 3, 1, 2) # uv_detail_normals = uv_detail_normals*self.uv_face_eye_mask + uv_coarse_normals*(1-self.uv_face_eye_mask) # uv_detail_normals = util.gaussian_blur(uv_detail_normals) return uv_detail_normals, uv_coarse_vertices def visualize(self, visdict, savepath, catdim=1): grids = {} for key in visdict: # print(key) if visdict[key] is None: continue grids[key] = torchvision.utils.make_grid( F.interpolate(visdict[key], [self.config.image_size, self.config.image_size])).detach().cpu() grid = torch.cat(list(grids.values()), catdim) grid_image = (grid.numpy().transpose(1, 2, 0).copy() * 255)[:, :, [2, 1, 0]] grid_image = np.minimum(np.maximum(grid_image, 0), 255).astype(np.uint8) if savepath is not None: cv2.imwrite(savepath, grid_image) return grid_image def create_mesh(self, opdict, dense_template): ''' vertices: [nv, 3], tensor texture: [3, h, w], tensor ''' i = 0 vertices = opdict['verts'][i].cpu().numpy() faces = self.render.faces[0].cpu().numpy() if 'uv_texture_gt' in opdict.keys(): texture = util.tensor2image(opdict['uv_texture_gt'][i]) else: texture = None uvcoords = self.render.raw_uvcoords[0].cpu().numpy() uvfaces = self.render.uvfaces[0].cpu().numpy() # save coarse mesh, with texture and normal map if 'uv_detail_normals' in opdict.keys(): normal_map = util.tensor2image(opdict['uv_detail_normals'][i]*0.5 + 0.5) # upsample mesh, save detailed mesh texture = texture[:, :, [2, 1, 0]] normals = opdict['normals'][i].cpu().numpy() displacement_map = opdict['displacement_map'][i].detach().cpu().numpy().squeeze() dense_vertices, dense_colors, dense_faces = util.upsample_mesh(vertices, normals, faces, displacement_map, texture, dense_template) else: normal_map = None dense_vertices = None dense_colors = None dense_faces = None return vertices, faces, texture, uvcoords, uvfaces, normal_map, dense_vertices, dense_faces, dense_colors def save_obj(self, filename, opdict, dense_template, mode ='detail'): if mode not in ['coarse', 'detail', 'both']: raise ValueError(f"Invalid mode '{mode}. Expected modes are: 'coarse', 'detail', 'both'") vertices, faces, texture, uvcoords, uvfaces, normal_map, dense_vertices, dense_faces, dense_colors \ = self.create_mesh(opdict, dense_template) if mode == 'both': if isinstance(filename, list): filename_coarse = filename[0] filename_detail = filename[1] else: filename_coarse = filename filename_detail = filename.replace('.obj', '_detail.obj') elif mode == 'coarse': filename_coarse = filename else: filename_detail = filename if mode in ['coarse', 'both']: util.write_obj(str(filename_coarse), vertices, faces, texture=texture, uvcoords=uvcoords, uvfaces=uvfaces, normal_map=normal_map) if mode in ['detail', 'both']: util.write_obj(str(filename_detail), dense_vertices, dense_faces, colors = dense_colors, inverse_face_order=True) class ExpDECAInterface(object): """ This serves as an interface for EMOCA-like classes that need to use a different sub class but retain the EMOCA functionality. See EMICA_v2 for an example. """ def _create_model(self): # E_flame should be fixed for expression EMOCA self.E_flame.requires_grad_(False) # 2) add expression decoder if self.config.expression_backbone == 'deca_parallel': ## a) Attach a parallel flow of FCs onto the original DECA coarse backbone. (Only the second FC head is trainable) self.E_expression = SecondHeadResnet(self.E_flame, self.n_exp_param, 'same') elif self.config.expression_backbone == 'deca_clone': ## b) Clones the original DECA coarse decoder (and the entire decoder will be trainable) - This is in final EMOCA. #TODO this will only work for Resnet. Make this work for the other backbones (Swin) as well. self.E_expression = ResnetEncoder(self.n_exp_param) # clone parameters of the ResNet self.E_expression.encoder.load_state_dict(self.E_flame.encoder.state_dict()) elif self.config.expression_backbone == 'emonet_trainable': # Trainable EmoNet instead of Resnet (deprecated) self.E_expression = EmoNetRegressor(self.n_exp_param) elif self.config.expression_backbone == 'emonet_static': # Frozen EmoNet with a trainable head instead of Resnet (deprecated)
self.E_expression = EmonetRegressorStatic(self.n_exp_param)
22
2023-11-07 20:13:32+00:00
24k
hxz393/ConfigCenterComparer
ui/action_start.py
[ { "identifier": "COL_INFO", "path": "config/settings.py", "snippet": "COL_INFO = {\n \"name\": {\"col\": 0},\n \"group\": {\"col\": 1},\n \"key\": {\"col\": 2},\n \"pro_value\": {\"col\": 3},\n \"pro_time\": {\"col\": 4},\n \"pre_value\": {\"col\": 5},\n \"pre_time\": {\"col\": 6},\...
import logging from typing import Dict, List from PyQt5.QtCore import Qt, QThread, pyqtSignal, QObject from PyQt5.QtGui import QIcon from PyQt5.QtWidgets import QAction, QHeaderView from config.settings import COL_INFO from lib.get_resource_path import get_resource_path from module.execute_queries import execute_queries from ui.config_manager import ConfigManager from ui.filter_bar import FilterBar from ui.lang_manager import LangManager from ui.message_show import message_show from ui.table_main import TableMain
15,024
def initialize(self) -> None: """ 初始化界面和状态,在开始操作前执行。 此方法用于设置 UI 元素的初始状态,如禁用按钮、清空表格等。 :rtype: None :return: 无返回值。 """ logger.info('Start running') # 状态栏发送提示消息 self.status_updated.emit(self.lang['ui.action_start_3']) # 开始按钮不可点击 self.action_start.setEnabled(False) # 禁用表格排序 self.table.setSortingEnabled(False) # 禁用表格更新 self.table.setUpdatesEnabled(False) # 禁用过滤栏组件 self.filter_bar.filter_app_box.setEnabled(False) self.filter_bar.filter_table_box.setEnabled(False) self.filter_bar.filter_table_check_box.setEnabled(False) self.filter_bar.filter_value_box.setEnabled(False) self.filter_bar.filter_value_button.setEnabled(False) self.filter_bar.filter_reset_button.setEnabled(False) # 清空表格数据 self.table.clear() # 初始化表宽 self.table.set_header_resize() logger.debug('Initialization finished') def table_insert(self, table_rows: List[List[List[str]]]) -> None: """ 将查询结果插入到主表格中。 此方法接收查询结果作为输入,并将其格式化后插入到应用程序的主表格中。每个元素是一个三重列表,表示表格的一行数据。 :param table_rows: 待插入的表格数据,每个元素代表一行数据。 :type table_rows: List[List[List[str]]] :rtype: None :return: 无返回值。 """ for row in table_rows: self.table.add_row(row) logger.debug('Table filling finished.') def table_column_hide(self, query_statuses: Dict[str, bool]) -> None: """ 根据查询状态决定是否隐藏表格的某些列。 :param query_statuses: 各查询状态的字典,键为环境名,值为布尔值指示是否开启。 :type query_statuses: Dict[str, bool] :rtype: None :return: 无返回值。 """ # env_name类似'PRO_CONFIG',COL_INFO中的键类似'pro_value',env_switch是布尔值。 for env_name, env_switch in query_statuses.items(): # 建立env_name和COL_INFO中的键的映射 column_name_mapping = {'PRO_CONFIG': 'pro_value', 'PRE_CONFIG': 'pre_value', 'TEST_CONFIG': 'test_value', 'DEV_CONFIG': 'dev_value'} # 获取列序号 col = COL_INFO[column_name_mapping[env_name]]['col'] # 根据环境开关,决定列是否隐藏。 self.table.showColumn(col) if env_switch else self.table.hideColumn(col) def finalize(self) -> None: """ 完成查询后的收尾工作,包括重新启用表格排序和更新等。 :rtype: None :return: 无返回值。 """ # 先应用颜色和过滤器 self.table.apply_color_to_table() self.filter_bar.filter_table() # 启动排序 self.table.setSortingEnabled(True) # 默认按第一列升序排序 self.table.sortByColumn(0, Qt.AscendingOrder) # 允许用户调整列宽 self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Interactive) # 更新过滤器,过滤服务中插入值 self.filter_bar.filter_options_add() # 调用过滤器 self.filter_bar.highlight_rows.clear() self.filter_bar.filter_table() # 启用表格更新 self.table.setUpdatesEnabled(True) # 启用过滤栏组件 self.filter_bar.filter_app_box.setEnabled(True) self.filter_bar.filter_table_box.setEnabled(True) self.filter_bar.filter_table_check_box.setEnabled(True) self.filter_bar.filter_value_box.setEnabled(True) self.filter_bar.filter_value_button.setEnabled(True) self.filter_bar.filter_reset_button.setEnabled(True) def show_result_message(self, result: str) -> None: """ 显示结果消息。 根据运行结果,显示不同的状态消息或错误信息。 :param result: 运行结果的描述。 :type result: str :rtype: None :return: 无返回值。 """ self.action_start.setEnabled(True) if result == 'done': logger.info('Run Completed') else: message = { 'no query result': ('Warning', self.lang['ui.action_start_4']), 'prepare table rows failed': ('Warning', self.lang['ui.action_start_6']), 'run error': ('Critical', self.lang['ui.action_start_7']) }.get(result) if message:
""" 提供应用程序的主要功能,包括用户界面初始化、数据库查询执行、数据展示和处理。 本模块中包含的类负责应用程序的主要操作流程,如用户界面的初始化、按钮动作的处理、后台数据查询、数据展示等。主要类包括`ActionStart`和`StartWork`,分别负责处理用户界面动作和执行后台工作。 :author: assassing :contact: https://github.com/hxz393 :copyright: Copyright 2023, hxz393. 保留所有权利。 """ logger = logging.getLogger(__name__) class ActionStart(QObject): """ 负责处理用户界面动作,例如初始化界面、响应按钮点击等。 此类包含了界面的主要动作逻辑,如开始按钮的点击处理、用户界面语言的更新、表格的数据填充等。它与后台线程`StartWork`协作,实现数据的查询和展示。 :param lang_manager: 语言管理器,用于界面语言的加载和更新。 :param config_manager: 配置管理器,提供应用程序的配置信息。 :param table: 主表格界面,用于数据的展示。 :param filter_bar: 过滤条,用于数据的筛选。 :type lang_manager: LangManager :type config_manager: ConfigManager :type table: TableMain :type filter_bar: FilterBar """ status_updated = pyqtSignal(str) def __init__(self, lang_manager: LangManager, config_manager: ConfigManager, table: TableMain, filter_bar: FilterBar): super().__init__() # 实例化组件 self.lang_manager = lang_manager self.lang_manager.lang_updated.connect(self.update_lang) self.config_manager = config_manager self.table = table self.filter_bar = filter_bar self.initUI() def initUI(self) -> None: """ 初始化用户界面。 创建并配置界面中的开始动作按钮,包括图标、快捷键和触发事件。 :rtype: None :return: 无返回值。 """ self.action_start = QAction(QIcon(get_resource_path('media/icons8-start-26.png')), 'Start') self.action_start.setShortcut('F10') self.action_start.triggered.connect(self.start) self.update_lang() def update_lang(self) -> None: """ 更新界面语言设置。 :rtype: None :return: 无返回值。 """ self.lang = self.lang_manager.get_lang() self.action_start.setText(self.lang['ui.action_start_1']) self.action_start.setStatusTip(self.lang['ui.action_start_2']) def start(self) -> None: """ 启动更新动作的处理流程。 此方法负责初始化和启动一个后台线程 `StartWork`,该线程执行数据查询和表格更新。同时,该方法还负责连接信号和槽以进行 UI 更新。 :rtype: None :return: 无返回值。 """ try: # 初始化子线程,传入语言字典和配置 self.start_work = StartWork(self.lang, self.config_manager) # 连接信号槽,都是 UI 操作,必须主线程中进行 self.start_work.initialize_signal.connect(self.initialize) self.start_work.table_insert_signal.connect(self.table_insert) self.start_work.table_column_hide_signal.connect(self.table_column_hide) self.start_work.finalize_signal.connect(self.finalize) self.start_work.message.connect(self.show_result_message) # 开始运行 self.start_work.start() except Exception: logger.exception('Failed to initiate start action.') self.status_updated.emit(self.lang['label_status_error']) def initialize(self) -> None: """ 初始化界面和状态,在开始操作前执行。 此方法用于设置 UI 元素的初始状态,如禁用按钮、清空表格等。 :rtype: None :return: 无返回值。 """ logger.info('Start running') # 状态栏发送提示消息 self.status_updated.emit(self.lang['ui.action_start_3']) # 开始按钮不可点击 self.action_start.setEnabled(False) # 禁用表格排序 self.table.setSortingEnabled(False) # 禁用表格更新 self.table.setUpdatesEnabled(False) # 禁用过滤栏组件 self.filter_bar.filter_app_box.setEnabled(False) self.filter_bar.filter_table_box.setEnabled(False) self.filter_bar.filter_table_check_box.setEnabled(False) self.filter_bar.filter_value_box.setEnabled(False) self.filter_bar.filter_value_button.setEnabled(False) self.filter_bar.filter_reset_button.setEnabled(False) # 清空表格数据 self.table.clear() # 初始化表宽 self.table.set_header_resize() logger.debug('Initialization finished') def table_insert(self, table_rows: List[List[List[str]]]) -> None: """ 将查询结果插入到主表格中。 此方法接收查询结果作为输入,并将其格式化后插入到应用程序的主表格中。每个元素是一个三重列表,表示表格的一行数据。 :param table_rows: 待插入的表格数据,每个元素代表一行数据。 :type table_rows: List[List[List[str]]] :rtype: None :return: 无返回值。 """ for row in table_rows: self.table.add_row(row) logger.debug('Table filling finished.') def table_column_hide(self, query_statuses: Dict[str, bool]) -> None: """ 根据查询状态决定是否隐藏表格的某些列。 :param query_statuses: 各查询状态的字典,键为环境名,值为布尔值指示是否开启。 :type query_statuses: Dict[str, bool] :rtype: None :return: 无返回值。 """ # env_name类似'PRO_CONFIG',COL_INFO中的键类似'pro_value',env_switch是布尔值。 for env_name, env_switch in query_statuses.items(): # 建立env_name和COL_INFO中的键的映射 column_name_mapping = {'PRO_CONFIG': 'pro_value', 'PRE_CONFIG': 'pre_value', 'TEST_CONFIG': 'test_value', 'DEV_CONFIG': 'dev_value'} # 获取列序号 col = COL_INFO[column_name_mapping[env_name]]['col'] # 根据环境开关,决定列是否隐藏。 self.table.showColumn(col) if env_switch else self.table.hideColumn(col) def finalize(self) -> None: """ 完成查询后的收尾工作,包括重新启用表格排序和更新等。 :rtype: None :return: 无返回值。 """ # 先应用颜色和过滤器 self.table.apply_color_to_table() self.filter_bar.filter_table() # 启动排序 self.table.setSortingEnabled(True) # 默认按第一列升序排序 self.table.sortByColumn(0, Qt.AscendingOrder) # 允许用户调整列宽 self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Interactive) # 更新过滤器,过滤服务中插入值 self.filter_bar.filter_options_add() # 调用过滤器 self.filter_bar.highlight_rows.clear() self.filter_bar.filter_table() # 启用表格更新 self.table.setUpdatesEnabled(True) # 启用过滤栏组件 self.filter_bar.filter_app_box.setEnabled(True) self.filter_bar.filter_table_box.setEnabled(True) self.filter_bar.filter_table_check_box.setEnabled(True) self.filter_bar.filter_value_box.setEnabled(True) self.filter_bar.filter_value_button.setEnabled(True) self.filter_bar.filter_reset_button.setEnabled(True) def show_result_message(self, result: str) -> None: """ 显示结果消息。 根据运行结果,显示不同的状态消息或错误信息。 :param result: 运行结果的描述。 :type result: str :rtype: None :return: 无返回值。 """ self.action_start.setEnabled(True) if result == 'done': logger.info('Run Completed') else: message = { 'no query result': ('Warning', self.lang['ui.action_start_4']), 'prepare table rows failed': ('Warning', self.lang['ui.action_start_6']), 'run error': ('Critical', self.lang['ui.action_start_7']) }.get(result) if message:
message_show(*message)
6
2023-11-07 01:02:38+00:00
24k
google-research/semivl
model/builder.py
[ { "identifier": "TIMMVisionTransformer", "path": "model/backbone/timm_vit.py", "snippet": "class TIMMVisionTransformer(nn.Module):\n\n def __init__(\n self,\n variant,\n timm_load_pretrained,\n drop_path_rate,\n img_size,\n out_indices,\n ):\n super...
import types import torch from functools import reduce from mmcv.utils import Config from mmseg.models import ASPPHead, DepthwiseSeparableASPPHead, build_segmentor from mmseg.ops import resize from torch.nn import functional as F from model.backbone.timm_vit import TIMMVisionTransformer from model.decode_heads.dlv3p_head import DLV3PHead from model.decode_heads.vlg_head import VLGHead from model.vlm import VLM from third_party.maskclip.models.backbones.maskclip_vit import MaskClipVisionTransformer from third_party.maskclip.models.decode_heads.maskclip2_head import MaskClip2Head from third_party.maskclip.models.decode_heads.maskclip_head import MaskClipHead from third_party.unimatch.model.semseg.deeplabv3plus import DeepLabV3Plus from third_party.zegclip.losses.atm_loss import SegLossPlus from third_party.zegclip.models.backbones.clip_vit import CLIPVisionTransformer from third_party.zegclip.models.backbones.clip_vpt_vit import VPTCLIPVisionTransformer from third_party.zegclip.models.backbones.text_encoder import CLIPTextEncoder from third_party.zegclip.models.backbones.utils import DropPath from third_party.zegclip.models.decode_heads.atm_head import ATMSingleHeadSeg
19,928
# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def nested_set(dic, key, value): keys = key.split('.') for key in keys[:-1]: dic = dic.setdefault(key, {}) dic[keys[-1]] = value def nested_get(dictionary, keys, default=None): return reduce(lambda d, key: d.get(key, default) if isinstance(d, dict) else default, keys.split("."), dictionary) def is_vlm(obj): return isinstance(obj, VLM) def forward_wrapper(self, img, gt=None, need_fp=False, only_fp=False, forward_mode='default'): if forward_mode == 'maskclip_trust': return self.train_maskclip_trust(img, gt) elif forward_mode == 'default': x = self.extract_feat(img) if self.disable_dropout: dropout_modules = [module for module in self.modules() if isinstance(module, torch.nn.Dropout) or isinstance(module, DropPath)] for module in dropout_modules: module.eval() if only_fp: if is_vlm(self): feats = x[0][0] x[0][0] = [F.dropout2d(f, self.fp_rate) for f in feats] # perturb features from conv_encoder if len(x) == 3 and x[2] is not None: x[2] = [F.dropout2d(f, self.fp_rate) for f in x[2]] # also provide unperturbed features if hasattr(self.decode_head, 'dc_unperturbed') and self.decode_head.dc_unperturbed: assert len(x[0]) == 2 x[0].append(feats) else: x = [F.dropout2d(f, self.fp_rate) for f in x] elif need_fp: if is_vlm(self): feats = x[0][0] x[0][0] = [torch.cat((f, F.dropout2d(f, self.fp_rate))) for f in feats] x[0][1] = torch.cat((x[0][1], x[0][1])) # perturb features from conv_encoder if len(x) == 3 and x[2] is not None: x[2] = [torch.cat((f, F.dropout2d(f, self.fp_rate))) for f in x[2]] # also provide unperturbed features if hasattr(self.decode_head, 'dc_unperturbed') and self.decode_head.dc_unperturbed: assert len(x[0]) == 2 x[0].append([torch.cat((f, f)) for f in feats]) else: x = [torch.cat((f, F.dropout2d(f, self.fp_rate))) for f in x] out = self._decode_head_forward_test(x, img_metas=None) out = resize( input=out, size=img.shape[2:], mode='bilinear', align_corners=self.align_corners) if need_fp: out = out.chunk(2) return out else: raise ValueError(forward_mode) def build_model(cfg): model_type = cfg['model'] if model_type == 'deeplabv3plus':
# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def nested_set(dic, key, value): keys = key.split('.') for key in keys[:-1]: dic = dic.setdefault(key, {}) dic[keys[-1]] = value def nested_get(dictionary, keys, default=None): return reduce(lambda d, key: d.get(key, default) if isinstance(d, dict) else default, keys.split("."), dictionary) def is_vlm(obj): return isinstance(obj, VLM) def forward_wrapper(self, img, gt=None, need_fp=False, only_fp=False, forward_mode='default'): if forward_mode == 'maskclip_trust': return self.train_maskclip_trust(img, gt) elif forward_mode == 'default': x = self.extract_feat(img) if self.disable_dropout: dropout_modules = [module for module in self.modules() if isinstance(module, torch.nn.Dropout) or isinstance(module, DropPath)] for module in dropout_modules: module.eval() if only_fp: if is_vlm(self): feats = x[0][0] x[0][0] = [F.dropout2d(f, self.fp_rate) for f in feats] # perturb features from conv_encoder if len(x) == 3 and x[2] is not None: x[2] = [F.dropout2d(f, self.fp_rate) for f in x[2]] # also provide unperturbed features if hasattr(self.decode_head, 'dc_unperturbed') and self.decode_head.dc_unperturbed: assert len(x[0]) == 2 x[0].append(feats) else: x = [F.dropout2d(f, self.fp_rate) for f in x] elif need_fp: if is_vlm(self): feats = x[0][0] x[0][0] = [torch.cat((f, F.dropout2d(f, self.fp_rate))) for f in feats] x[0][1] = torch.cat((x[0][1], x[0][1])) # perturb features from conv_encoder if len(x) == 3 and x[2] is not None: x[2] = [torch.cat((f, F.dropout2d(f, self.fp_rate))) for f in x[2]] # also provide unperturbed features if hasattr(self.decode_head, 'dc_unperturbed') and self.decode_head.dc_unperturbed: assert len(x[0]) == 2 x[0].append([torch.cat((f, f)) for f in feats]) else: x = [torch.cat((f, F.dropout2d(f, self.fp_rate))) for f in x] out = self._decode_head_forward_test(x, img_metas=None) out = resize( input=out, size=img.shape[2:], mode='bilinear', align_corners=self.align_corners) if need_fp: out = out.chunk(2) return out else: raise ValueError(forward_mode) def build_model(cfg): model_type = cfg['model'] if model_type == 'deeplabv3plus':
model = DeepLabV3Plus(cfg)
7
2023-11-02 14:49:38+00:00
24k
codefuse-ai/Collinear-Constrained-Attention
model/build_model.py
[ { "identifier": "get_model_params_num", "path": "utils/common_utils.py", "snippet": "def get_model_params_num(model):\n \"\"\"\n Get params number of the model\n Args:\n model: model(required)\n Returns:\n the number of parameters of model\n \"\"\"\n num = 0\n for _, p...
import os import torch import sys import peft import model.peft.modeling_peft # noqa import bitsandbytes as bnb # noqa import accelerate # noqa from utils.common_utils import get_model_params_num from transformers import ( # noqa: E402 CONFIG_MAPPING, AutoConfig, AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast ) from .gpt_neox.configuration_gpt_neox import GPTNeoXConfig from .gpt_neox.modeling_gpt_neox import GPTNeoXForCausalLM from .gpt_neox.tokenization_gpt_neox_fast import GPTNeoXTokenizerFast from .llama.configuration_llama import LlamaConfig from .llama.modeling_llama import LlamaForCausalLM from .llama.tokenization_llama import LlamaTokenizer from .llama.tokenization_llama_fast import LlamaTokenizerFast from torch.distributed.fsdp import ( FullyShardedDataParallel as FSDP, StateDictType, ) from utils.common_utils import print_rank_0, is_old_version from tokenizer import build_tokenizer from tokenizer.tokenizer import HFTokenizer from peft.tuners.lora import LoraLayer from model.peft.utils import prepare_model_for_kbit_training from peft import ( # noqa LoraConfig, PrefixTuningConfig, PromptEncoderConfig, PromptEncoderReparameterizationType, PromptTuningConfig, PromptTuningInit, TaskType, get_peft_model ) from model.peft.tuner import AdaLoraConfig from transformers import BitsAndBytesConfig from packaging import version from .glm.tokenization_glm_deprecated import GLMChineseTokenizer
17,414
# coding=utf-8 # Copyright (c) 2023 Ant Group. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. sys.path.append("..") # from .glm.modeling_glm import GLMForConditionalGeneration # from .glm.configuration_glm import GLMConfig # from .glm.tokenization_glm import GLMTokenizer try: except ImportError: BitsAndBytesConfig = None try: except ImportError: bnb = None def find_all_linear_names(args, model): cls = bnb.nn.Linear4bit if args.bits == 4 else (bnb.nn.Linear8bitLt if args.bits == 8 else torch.nn.Linear) lora_module_names = set() for name, module in model.named_modules(): if isinstance(module, cls): names = name.split('.') lora_module_names.add(names[0] if len(names) == 1 else names[-1]) if 'lm_head' in lora_module_names: # needed for 16-bit lora_module_names.remove('lm_head') return list(lora_module_names) def setup_model(args, logger, use_cache=False): # Load pretrained model and tokenizer if args.pretrained_model_path: # TODO: 实现from pretrained读tokenizer if args.model_type == 'gpt_neox': # if args.tokenizer_type: # tokenizer = build_tokenizer(args) # tokenizer.eod_token = "<|endoftext|>" # tokenizer.pad_token = "<|pad|>" # # tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset # # tokenizer.eop_token = "<|endoftext|>" # tokenizer.eod_id = tokenizer.tokenize(tokenizer.eod_token)[0] # tokenizer.pad_id = tokenizer.tokenize(tokenizer.pad_token)[0] # else: tokenizer = GPTNeoXTokenizerFast.from_pretrained(args.pretrained_model_path) # tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') elif args.model_type == 'llama': tokenizer = LlamaTokenizerFast.from_pretrained(args.pretrained_model_path) # tokenizer = AutoTokenizer.from_pretrained( # args.pretrained_model_path, # trust_remote_code=True, # ) tokenizer.eod_token = "</s>" tokenizer.eos_token = "</s>" tokenizer.bos_token = "<s>" tokenizer.pad_token = "[PAD]" tokenizer.unk_token = "<unk>" tokenizer.sop_token = "</s>" # 适配multi task dataset tokenizer.eop_token = "</s>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.eos_id = tokenizer.convert_tokens_to_ids(tokenizer.eos_token) tokenizer.bos_id = tokenizer.convert_tokens_to_ids(tokenizer.bos_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) tokenizer.unk_id = tokenizer.convert_tokens_to_ids(tokenizer.unk_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.eos_token} id: {tokenizer.eos_id}') print_rank_0(f'tokenizer {tokenizer.bos_token} id: {tokenizer.bos_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') print_rank_0(f'tokenizer {tokenizer.unk_token} id: {tokenizer.unk_id}') elif args.model_type == 'glm': if is_old_version(args.pretrained_model_path): tokenizer = GLMChineseTokenizer.from_pretrained(args.pretrained_model_path) else: tokenizer = GLMTokenizer.from_pretrained(args.pretrained_model_path) elif args.train_mode == 'sst': # tokenizer = build_tokenizer(args) tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_path." ) if args.model_type == 'gpt_neox':
# coding=utf-8 # Copyright (c) 2023 Ant Group. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. sys.path.append("..") # from .glm.modeling_glm import GLMForConditionalGeneration # from .glm.configuration_glm import GLMConfig # from .glm.tokenization_glm import GLMTokenizer try: except ImportError: BitsAndBytesConfig = None try: except ImportError: bnb = None def find_all_linear_names(args, model): cls = bnb.nn.Linear4bit if args.bits == 4 else (bnb.nn.Linear8bitLt if args.bits == 8 else torch.nn.Linear) lora_module_names = set() for name, module in model.named_modules(): if isinstance(module, cls): names = name.split('.') lora_module_names.add(names[0] if len(names) == 1 else names[-1]) if 'lm_head' in lora_module_names: # needed for 16-bit lora_module_names.remove('lm_head') return list(lora_module_names) def setup_model(args, logger, use_cache=False): # Load pretrained model and tokenizer if args.pretrained_model_path: # TODO: 实现from pretrained读tokenizer if args.model_type == 'gpt_neox': # if args.tokenizer_type: # tokenizer = build_tokenizer(args) # tokenizer.eod_token = "<|endoftext|>" # tokenizer.pad_token = "<|pad|>" # # tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset # # tokenizer.eop_token = "<|endoftext|>" # tokenizer.eod_id = tokenizer.tokenize(tokenizer.eod_token)[0] # tokenizer.pad_id = tokenizer.tokenize(tokenizer.pad_token)[0] # else: tokenizer = GPTNeoXTokenizerFast.from_pretrained(args.pretrained_model_path) # tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') elif args.model_type == 'llama': tokenizer = LlamaTokenizerFast.from_pretrained(args.pretrained_model_path) # tokenizer = AutoTokenizer.from_pretrained( # args.pretrained_model_path, # trust_remote_code=True, # ) tokenizer.eod_token = "</s>" tokenizer.eos_token = "</s>" tokenizer.bos_token = "<s>" tokenizer.pad_token = "[PAD]" tokenizer.unk_token = "<unk>" tokenizer.sop_token = "</s>" # 适配multi task dataset tokenizer.eop_token = "</s>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.eos_id = tokenizer.convert_tokens_to_ids(tokenizer.eos_token) tokenizer.bos_id = tokenizer.convert_tokens_to_ids(tokenizer.bos_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) tokenizer.unk_id = tokenizer.convert_tokens_to_ids(tokenizer.unk_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.eos_token} id: {tokenizer.eos_id}') print_rank_0(f'tokenizer {tokenizer.bos_token} id: {tokenizer.bos_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') print_rank_0(f'tokenizer {tokenizer.unk_token} id: {tokenizer.unk_id}') elif args.model_type == 'glm': if is_old_version(args.pretrained_model_path): tokenizer = GLMChineseTokenizer.from_pretrained(args.pretrained_model_path) else: tokenizer = GLMTokenizer.from_pretrained(args.pretrained_model_path) elif args.train_mode == 'sst': # tokenizer = build_tokenizer(args) tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_path." ) if args.model_type == 'gpt_neox':
auto_config = GPTNeoXConfig
1
2023-11-02 01:37:01+00:00
24k
bytedance/cryostar
projects/star/train_atom.py
[ { "identifier": "SpatialGridTranslate", "path": "cryostar/utils/transforms.py", "snippet": "class SpatialGridTranslate(torch.nn.Module):\n\n def __init__(self, D, device=None) -> None:\n super().__init__()\n self.D = D\n # yapf: disable\n coords = torch.stack(torch.meshgri...
import os.path as osp import warnings import collections import einops import numpy as np import biotite.structure as struc import torch import lightning.pytorch as pl from pathlib import Path from copy import deepcopy from torch import nn from torch import optim from torch.utils.data import DataLoader from torchinfo import summary from lightning.fabric.utilities.warnings import PossibleUserWarning from lightning.pytorch.utilities import rank_zero_only from lightning.pytorch.strategies import DDPStrategy from mmengine import mkdir_or_exist from cryostar.utils.transforms import SpatialGridTranslate from cryostar.utils.dataio import StarfileDataSet, StarfileDatasetConfig, Mask from cryostar.utils.ctf_utils import CTFRelion, CTFCryoDRGN from cryostar.utils.losses import calc_cor_loss, calc_kl_loss from cryostar.utils.misc import log_to_current, \ pl_init_exp, pretty_dict, set_seed, warmup from cryostar.utils.pdb_tools import bt_save_pdb from cryostar.gmm.gmm import EMAN2Grid, batch_projection, Gaussian from cryostar.gmm.deformer import E3Deformer, NMADeformer from cryostar.utils.fft_utils import primal_to_fourier_2d, fourier_to_primal_2d from cryostar.utils.polymer import Polymer, NT_ATOMS, AA_ATOMS from cryostar.utils.dist_loss import (find_quaint_cutoff_pairs, find_range_cutoff_pairs, find_continuous_pairs, calc_dist_by_pair_indices, remove_duplicate_pairs, filter_same_chain_pairs, DistLoss) from cryostar.utils.latent_space_utils import get_nearest_point, cluster_kmeans, run_pca, get_pc_traj, run_umap from cryostar.utils.vis_utils import plot_z_dist, save_tensor_image from cryostar.utils.pl_utils import merge_step_outputs, squeeze_dict_outputs_1st_dim, \ filter_outputs_by_indices, get_1st_unique_indices from miscs import calc_pair_dist_loss, calc_clash_loss, low_pass_mask2d, VAE, infer_ctf_params_from_config
14,716
# other # avoid num_workers set as cpu_count warning warnings.simplefilter("ignore", PossibleUserWarning) # only log to rank_zero, comment this for debugging log_to_current = rank_zero_only(log_to_current) TASK_NAME = "atom" def prepare_images(images: torch.FloatTensor, space: str): assert space in ("real", "fourier") if space == "real": model_input = einops.rearrange(images, "b 1 ny nx -> b (1 ny nx)") else: fimages = primal_to_fourier_2d(images) model_input = einops.rearrange(torch.view_as_real(fimages), "b 1 ny nx c2 -> b (1 ny nx c2)", c2=2) return model_input class InitTask(pl.LightningModule): def __init__(self, em_module): super().__init__() self.cfg = em_module.cfg self.em_module = em_module self.loss_deque = collections.deque([ 10, ], maxlen=20) def on_train_batch_end(self, outputs, batch, batch_idx): self.loss_deque.append(outputs['loss'].item()) if np.mean(self.loss_deque) < 1e-3: self.trainer.should_stop = True # update all process status self.trainer.should_stop = self.trainer.strategy.broadcast(self.trainer.should_stop) def training_step(self, batch, batch_idx): images = batch["proj"] idxes = batch["idx"] rot_mats, trans_mats = self.em_module.get_batch_pose(batch) pred_deformation, mu, log_var = self.em_module.model(prepare_images(images, self.cfg.model.input_space), idxes, rot_mats) shift_loss = torch.mean(torch.pow(pred_deformation.flatten(start_dim=-2), 2)) loss = shift_loss if self.global_step % self.cfg.runner.log_every_n_step == 0: log_to_current(f"loss {loss.item()}") return loss def configure_optimizers(self): return optim.AdamW(self.em_module.model.parameters(), lr=1e-4) def on_fit_end(self): log_to_current(f"Init finished with loss {np.mean(self.loss_deque)}") class CryoEMTask(pl.LightningModule): def __init__(self, cfg, dataset): super().__init__() cfg = deepcopy(cfg) self.cfg = cfg # Define GMM meta = Polymer.from_pdb(cfg.dataset_attr.ref_pdb_path) log_to_current(f"Load reference structure from {cfg.dataset_attr.ref_pdb_path}") # for save self.template_pdb = meta.to_atom_arr() log_to_current(f"Protein contains {len(meta)} atoms, " f"{meta.num_amino_acids} amino acids, " f"{meta.num_nucleotides} nucleotides, " f"{meta.num_chains} chains.") # ref ref_centers = torch.from_numpy(meta.coord).float() ref_amps = torch.from_numpy(meta.num_electron).float() ref_sigmas = torch.ones_like(ref_amps) ref_sigmas.fill_(2.) log_to_current(f"1st GMM blob amplitude {ref_amps[0].item()}, sigma {ref_sigmas[0].item()}") num_pts = len(meta) log_to_current(f"Reference structure has {num_pts} atom coordinates") # tunable params # gmm self.register_buffer("gmm_centers", ref_centers) if cfg.gmm.tunable: log_to_current("Set GMM sigmas, amplitudes tunable") self.register_parameter("gmm_sigmas", nn.Parameter(ref_sigmas)) self.register_parameter("gmm_amps", nn.Parameter(ref_amps)) else: self.register_buffer("gmm_sigmas", ref_sigmas) self.register_buffer("gmm_amps", ref_amps) nma_modes = None if (hasattr(self.cfg.extra_input_data_attr, "nma_path") and self.cfg.extra_input_data_attr.nma_path not in ["", None]): nma_modes = torch.tensor(np.load(self.cfg.extra_input_data_attr.nma_path), dtype=torch.float32) log_to_current(f"Load NMA coefficients from {self.cfg.extra_input_data_attr.nma_path}, " f"whose shape is {nma_modes.shape}") # model if cfg.model.input_space == "fourier": in_dim = 2 * cfg.data_process.down_side_shape ** 2 elif cfg.model.input_space == "real": in_dim = cfg.data_process.down_side_shape ** 2 else: raise NotImplementedError self.model = VAE(in_dim=in_dim, out_dim=num_pts * 3 if nma_modes is None else 6 + nma_modes.shape[1], **cfg.model.model_cfg) log_to_current('Model summary:\n' + str(summary(self.model, input_size=[(1, in_dim), (1,)], verbose=0))) if nma_modes is None:
# other # avoid num_workers set as cpu_count warning warnings.simplefilter("ignore", PossibleUserWarning) # only log to rank_zero, comment this for debugging log_to_current = rank_zero_only(log_to_current) TASK_NAME = "atom" def prepare_images(images: torch.FloatTensor, space: str): assert space in ("real", "fourier") if space == "real": model_input = einops.rearrange(images, "b 1 ny nx -> b (1 ny nx)") else: fimages = primal_to_fourier_2d(images) model_input = einops.rearrange(torch.view_as_real(fimages), "b 1 ny nx c2 -> b (1 ny nx c2)", c2=2) return model_input class InitTask(pl.LightningModule): def __init__(self, em_module): super().__init__() self.cfg = em_module.cfg self.em_module = em_module self.loss_deque = collections.deque([ 10, ], maxlen=20) def on_train_batch_end(self, outputs, batch, batch_idx): self.loss_deque.append(outputs['loss'].item()) if np.mean(self.loss_deque) < 1e-3: self.trainer.should_stop = True # update all process status self.trainer.should_stop = self.trainer.strategy.broadcast(self.trainer.should_stop) def training_step(self, batch, batch_idx): images = batch["proj"] idxes = batch["idx"] rot_mats, trans_mats = self.em_module.get_batch_pose(batch) pred_deformation, mu, log_var = self.em_module.model(prepare_images(images, self.cfg.model.input_space), idxes, rot_mats) shift_loss = torch.mean(torch.pow(pred_deformation.flatten(start_dim=-2), 2)) loss = shift_loss if self.global_step % self.cfg.runner.log_every_n_step == 0: log_to_current(f"loss {loss.item()}") return loss def configure_optimizers(self): return optim.AdamW(self.em_module.model.parameters(), lr=1e-4) def on_fit_end(self): log_to_current(f"Init finished with loss {np.mean(self.loss_deque)}") class CryoEMTask(pl.LightningModule): def __init__(self, cfg, dataset): super().__init__() cfg = deepcopy(cfg) self.cfg = cfg # Define GMM meta = Polymer.from_pdb(cfg.dataset_attr.ref_pdb_path) log_to_current(f"Load reference structure from {cfg.dataset_attr.ref_pdb_path}") # for save self.template_pdb = meta.to_atom_arr() log_to_current(f"Protein contains {len(meta)} atoms, " f"{meta.num_amino_acids} amino acids, " f"{meta.num_nucleotides} nucleotides, " f"{meta.num_chains} chains.") # ref ref_centers = torch.from_numpy(meta.coord).float() ref_amps = torch.from_numpy(meta.num_electron).float() ref_sigmas = torch.ones_like(ref_amps) ref_sigmas.fill_(2.) log_to_current(f"1st GMM blob amplitude {ref_amps[0].item()}, sigma {ref_sigmas[0].item()}") num_pts = len(meta) log_to_current(f"Reference structure has {num_pts} atom coordinates") # tunable params # gmm self.register_buffer("gmm_centers", ref_centers) if cfg.gmm.tunable: log_to_current("Set GMM sigmas, amplitudes tunable") self.register_parameter("gmm_sigmas", nn.Parameter(ref_sigmas)) self.register_parameter("gmm_amps", nn.Parameter(ref_amps)) else: self.register_buffer("gmm_sigmas", ref_sigmas) self.register_buffer("gmm_amps", ref_amps) nma_modes = None if (hasattr(self.cfg.extra_input_data_attr, "nma_path") and self.cfg.extra_input_data_attr.nma_path not in ["", None]): nma_modes = torch.tensor(np.load(self.cfg.extra_input_data_attr.nma_path), dtype=torch.float32) log_to_current(f"Load NMA coefficients from {self.cfg.extra_input_data_attr.nma_path}, " f"whose shape is {nma_modes.shape}") # model if cfg.model.input_space == "fourier": in_dim = 2 * cfg.data_process.down_side_shape ** 2 elif cfg.model.input_space == "real": in_dim = cfg.data_process.down_side_shape ** 2 else: raise NotImplementedError self.model = VAE(in_dim=in_dim, out_dim=num_pts * 3 if nma_modes is None else 6 + nma_modes.shape[1], **cfg.model.model_cfg) log_to_current('Model summary:\n' + str(summary(self.model, input_size=[(1, in_dim), (1,)], verbose=0))) if nma_modes is None:
self.deformer = E3Deformer()
13
2023-11-06 07:15:26+00:00
24k
KAIST-AILab/palr
train.py
[ { "identifier": "BC", "path": "imitation/bc.py", "snippet": "class BC(nn.Module):\n def __init__(self, policy, env, best_policy=None,\n replay_buffer=None, replay_buffer_valid=None, seed=0, \n device='cpu', lr=3e-4, envname=None, wandb=None, save_policy_path=None, \n ...
import os import wandb import envs import d4rl import gym import torch from imitation.bc import BC from imitation.rap import RAP from imitation.fca import FCA from imitation.mine import MINE_BC from imitation.palr import PALR from argparse import ArgumentParser from itertools import product from core.policy import TanhGaussianPolicyWithEmbedding, TanhGaussianRAPPolicy from core.replay_buffer import EnvReplayBuffer from core.preprocess import preprocess_dataset_with_prev_actions, data_select_num_transitions from rlkit.envs.wrappers import NormalizedBoxEnv
18,875
wandb_dir = '.' os.environ['WANDB_DIR'] = wandb_dir os.environ['D4RL_DATASET_DIR'] = './dataset/' def train(configs):
wandb_dir = '.' os.environ['WANDB_DIR'] = wandb_dir os.environ['D4RL_DATASET_DIR'] = './dataset/' def train(configs):
env = NormalizedBoxEnv(gym.make(configs['envname']))
10
2023-11-06 08:35:34+00:00
24k
tylerlight071/Project-Cipher
main.py
[ { "identifier": "clear_terminal", "path": "components/common_functions.py", "snippet": "def clear_terminal():\n os.system('cls' if os.name == 'nt' else 'clear')" }, { "identifier": "print_slow", "path": "components/common_functions.py", "snippet": "def print_slow(text, delay=0.00): #...
import msvcrt import os import pickle import sys import time import colorama import pygame from colorama import Fore, Style from components.common_functions import clear_terminal, print_slow, shop_help, help_user, connect_help, mail_help, \ system_help from conversations.calls import intro_call, first_call, second_call, third_call, fourth_call, fifth_call, sixth_call, \ markus_seen_call from conversations.minigame_calls import code_shatter_call from minigames.code_shatter_minigame import code_shatter_minigame from minigames.eye_spy_minigame import port_scanning from systems.level_1.amy.amy_system import AmySystem from systems.level_1.billy.billy_system import BillySystem from systems.level_1.cameras.camera_1 import camera_first from systems.level_1.markus.markus_system import MarkusSystem
15,883
print_slow(Fore.LIGHTBLUE_EX + f"\n{email['subject']} - From: {email['sender']}" + Style.RESET_ALL) def read_email(emails, subject): global has_read_email, evidence global balance email_found = False for email in emails: if email['subject'].lower() == subject.lower(): email_found = True print_slow( Fore.LIGHTBLUE_EX + f"\nFrom: {email['sender']}\nSubject: {email['subject']}\n\n{email['body']}" + Style.RESET_ALL) # Check if the email is one of the specific emails that increases evidence count if email['subject'].lower() in ["project update"]: evidence_item = 3 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) third_call() if email['subject'].lower() in ["professional development"]: evidence_item = 2 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) second_call() if email['subject'].lower() == "can't stop thinking about you" and email['sender'].lower() == 'amy': evidence_item = 1 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) first_call() if email['subject'].lower() == "upcoming software update" and email['sender'].lower() == 'markus': evidence_item = 6 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) sixth_call() # Add money to balance based on the email subject if email['subject'].lower() == "professional development": balance += 30 elif email['subject'].lower() == "project update": balance += 50 elif email['subject'].lower() == "can't stop thinking about you": balance += 20 elif email['subject'].lower() == "upcoming software update": balance += 50 if not email_found: print_slow(Fore.RED + "\nNo email found with that subject, please try again." + Style.RESET_ALL) def connect(): if has_item("EnigmaLink"): print_slow("") print_slow(Fore.GREEN + "Connecting to Enigma Corps network using EnigmaLink..." + Style.RESET_ALL) time.sleep(0.5) print_slow("") print_slow(Fore.GREEN + "Establishing connection...") time.sleep(1) print_slow("") print_slow(Fore.GREEN + "Linking EnigmaLink to remote server...") time.sleep(2) print_slow("") print_slow(Fore.GREEN + "Decrypting server security protocols...") time.sleep(3) print_slow("") print_slow(Fore.GREEN + "Bypassing firewall...") time.sleep(2) print_slow("") print_slow(Fore.GREEN + "Connection established!") time.sleep(2) print_slow("") print_slow(Fore.GREEN + "You are now connected to Enigma Corps network.") print_slow("") # Network command loop while True: command = input(Fore.GREEN + "> " + Style.RESET_ALL) # Scan the network for systems and vulnerabilities if command.lower() == "scan": scan() # Hack into a system or vulnerability elif command.lower().startswith("hack "): target = command[5:] hack(target) # Display connect help message elif command.lower() == "help":
# Set the PYGAME_HIDE_SUPPORT_PROMPT environment variable os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "1" # Initialize pygame mixer pygame.mixer.init() # Load the bg music file and loop it pygame.mixer.music.load('bg_music.mp3') pygame.mixer.music.play(-1) # sets the volume to 20% (change value to adjust) pygame.mixer.music.set_volume(0.2) # Define the global variables at the module level inventory = [] balance = 300 emails = [] has_read_email = False has_read_file = False has_intro_call = False seen_markus = False evidence = [] amy_system = AmySystem() billy_system = BillySystem() markus_system = MarkusSystem() bg_music_enabled = True player_level = 1 has_started_game = False # Save the game state to a file def save_game(): global inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus with open('savegame.pkl', 'wb') as f: pickle.dump( (inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus), f) # Load the game state from a file def load_game(): global inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus if os.path.exists('savegame.pkl'): with open('savegame.pkl', 'rb') as f: inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus = pickle.load( f) else: # If the savegame file doesn't exist, set the default values inventory = [] player_level = 1 evidence = [] has_intro_call = False has_started_game = False seen_markus = False balance = 30000 emails = [ { "sender": "Hacker's Digest", "subject": "Weekly Hacker's Digest", "body": ( "Issue #143\n\n" "Cipher,\n\n" "Welcome to the latest edition of Hacker's Digest! In this issue: \n\n" "- Unveiling the Latest Exploits\n" "- Spotlight on Cryptocurrency Security\n" "- Interview with a Grey Hat Hacker\n" "- Tool of the Week: EnigmaLink\n\n" "Don't miss out on the latest in the world of hacking and cybersecurity. Stay informed and stay secure!\n\n" "Best regards,\n" "Hacker's Digest Team" ) }, { "sender": "The Cyber Mythbuster", "subject": "Busting Cybersecurity Myths", "body": ( "Cipher,\n\n" "Heard any wild cybersecurity myths lately? This week, we're busting the craziest ones, including:\n\n" "- Using 'Password123' for Maximum Security\n" "- Cyber Ninjas and Their Stealthy VPNs\n" "- USB Drives: The Fountain of Eternal Data\n\n" "Stay myth-free and keep on hacking (responsibly)!\n\n" "Mythbustingly,\n" "The Cyber Mythbuster" ) }, { "sender": "CyberSilliness", "subject": "Where Cyber Meets Comedy", "body": ( "Welcome to the CyberSilliness Gazette\n" "Where we believe that a good laugh is the ultimate antivirus! In this week's hilarity-packed issue:\n\n" "- Cyber Jokes to Crack You Up (Without Cracking Your Passwords)\n" "- Tech Support Horror Stories: A Comedy of Errors\n" "- Chuckle Challenge: Share Your Funniest Cybersecurity Anecdote\n" "- Meet the Cyber Clowns: Our Team's Silly Security Habits Revealed\n\n" "Laughter is contagious, and so is good cybersecurity. Dive into the giggles and stay safe!\n\n" "Silly Regards,\n" "The CyberSilliness Team" ) }, { "sender": "Security Insight Weekly", "subject": "Navigating the Cybersecurity Landscape", "body": ( "Hello Cipher,\n\n" "Welcome to Security Insight Weekly, your reliable source for navigating the ever-evolving cybersecurity landscape. In this week's issue:\n\n" "- Threat Analysis: Understanding Recent Cybersecurity Incidents\n" "- Best Practices for Endpoint Security\n" "- Industry Spotlight: Healthcare Cybersecurity Challenges\n" "- Security Compliance Update: Staying Aligned with Regulations\n\n" "Stay informed and empowered as we delve into the serious aspects of cybersecurity. Your security is our priority.\n\n" "Best regards,\n" "The Security Insight Team" ) }, ] # New function for game settings def game_settings(): global bg_music_enabled print_slow(Fore.GREEN + "░██████╗███████╗████████╗████████╗██╗███╗░░██╗░██████╗░░██████╗") print_slow(Fore.GREEN + "██╔════╝██╔════╝╚══██╔══╝╚══██╔══╝██║████╗░██║██╔════╝░██╔════╝") print_slow(Fore.GREEN + "╚█████╗░█████╗░░░░░██║░░░░░░██║░░░██║██╔██╗██║██║░░██╗░╚█████╗░") print_slow(Fore.GREEN + "░╚═══██╗██╔══╝░░░░░██║░░░░░░██║░░░██║██║╚████║██║░░╚██╗░╚═══██╗") print_slow(Fore.GREEN + "██████╔╝███████╗░░░██║░░░░░░██║░░░██║██║░╚███║╚██████╔╝██████╔╝") print_slow(Fore.GREEN + "╚═════╝░╚══════╝░░░╚═╝░░░░░░╚═╝░░░╚═╝╚═╝░░╚══╝░╚═════╝░╚═════╝░" + Style.RESET_ALL) print_slow("") print_slow("") print_slow("") print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) print_slow( Fore.GREEN + f"| [Background Music] {'Enabled |' if bg_music_enabled else 'Disabled |'}" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Delete Savegame] |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Back to Main Menu] |" + Style.RESET_ALL) print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) choice = input(Fore.GREEN + "\n> " + Style.RESET_ALL) if choice.lower() == "background music": # Toggle background music bg_music_enabled = not bg_music_enabled if bg_music_enabled: pygame.mixer.music.play(-1) print_slow(Fore.GREEN + "\nBackground Music Enabled" + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() else: pygame.mixer.music.stop() print_slow(Fore.RED + "\nBackground Music Disabled" + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() elif choice.lower() == "delete savegame": # Delete savegame confirm = input(Fore.RED + "\nAre you sure you want to delete the savegame? (yes/no): " + Style.RESET_ALL) if confirm.lower() == "yes": try: os.remove("savegame.pkl") print_slow(Fore.GREEN + "\nSavegame Deleted" + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() except FileNotFoundError: print_slow(Fore.RED + "\nSavegame not found" + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() elif choice.lower() == "back" or choice.lower() == "back to main menu": # Return to Main Menu print_slow(Fore.GREEN + "\nReturning to Main Menu..." + Style.RESET_ALL) time.sleep(1) clear_terminal() else: print_slow(Fore.RED + "\nInvalid choice, please try again." + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() # Function to add an item to the inventory def add_to_inventory(item): inventory.append(item) def remove_from_inventory(item): if item in inventory: inventory.remove(item) def add_evidence(evidence_item): evidence.append(evidence_item) def has_evidence(evidence_item): return evidence_item in evidence # Prints the games title def main(): clear_terminal() colorama.init() print_slow(Fore.GREEN + "██████╗░██╗░░░░░░█████╗░░█████╗░██╗░░██╗██╗░░██╗░█████╗░████████╗" + Style.RESET_ALL) print_slow(Fore.GREEN + "██╔══██╗██║░░░░░██╔══██╗██╔══██╗██║░██╔╝██║░░██║██╔══██╗╚══██╔══╝" + Style.RESET_ALL) print_slow(Fore.GREEN + "██████╦╝██║░░░░░███████║██║░░╚═╝█████═╝░███████║███████║░░░██║░░░" + Style.RESET_ALL) print_slow(Fore.GREEN + "██╔══██╗██║░░░░░██╔══██║██║░░██╗██╔═██╗░██╔══██║██╔══██║░░░██║░░░" + Style.RESET_ALL) print_slow(Fore.GREEN + "██████╦╝███████╗██║░░██║╚█████╔╝██║░╚██╗██║░░██║██║░░██║░░░██║░░░" + Style.RESET_ALL) print_slow(Fore.GREEN + "╚═════╝░╚══════╝╚═╝░░╚═╝░╚════╝░╚═╝░░╚═╝╚═╝░░╚═╝╚═╝░░╚═╝░░░╚═╝░░░" + Style.RESET_ALL) # Pause for 2 seconds before clearing the console time.sleep(5) # Clear the console clear_terminal() # Main menu loop while True: print_slow(Fore.GREEN + "███╗░░░███╗░█████╗░██╗███╗░░██╗  ███╗░░░███╗███████╗███╗░░██╗██╗░░░██╗") print_slow(Fore.GREEN + "████╗░████║██╔══██╗██║████╗░██║  ████╗░████║██╔════╝████╗░██║██║░░░██║") print_slow(Fore.GREEN + "██╔████╔██║███████║██║██╔██╗██║  ██╔████╔██║█████╗░░██╔██╗██║██║░░░██║") print_slow(Fore.GREEN + "██║╚██╔╝██║██╔══██║██║██║╚████║  ██║╚██╔╝██║██╔══╝░░██║╚████║██║░░░██║") print_slow(Fore.GREEN + "██║░╚═╝░██║██║░░██║██║██║░╚███║  ██║░╚═╝░██║███████╗██║░╚███║╚██████╔╝") print_slow( Fore.GREEN + "╚═╝░░░░░╚═╝╚═╝░░╚═╝╚═╝╚═╝░░╚══╝  ╚═╝░░░░░╚═╝╚══════╝╚═╝░░╚══╝░╚═════╝░" + Style.RESET_ALL) print_slow("") print_slow("") print_slow("") print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Start] Start the game |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Options] Change the settings |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Exit] Exit the game |" + Style.RESET_ALL) print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) choice = input(Fore.GREEN + "\n> " + Style.RESET_ALL) # Start the game if choice.lower() == "start": load_game() start_game() # Open game settings elif choice.lower() == "options": clear_terminal() game_settings() # Exit the game elif choice.lower() == "exit": print_slow(Fore.GREEN + "\nExiting..." + Style.RESET_ALL) pygame.mixer.music.stop() sys.exit() else: print_slow(Fore.RED + "\nInvalid choice, please try again." + Style.RESET_ALL) time.sleep(2) clear_terminal() # Function to get the user's balance def get_balance(): return balance # Function to add money to the user's balance def add_money(amount): global balance balance += amount # Function to subtract money from the user's balance def subtract_money(amount): global balance balance -= amount def add_level(level): global player_level player_level += level # Function to print the user's balance def print_balance(): print_slow(f"Your current balance is: £{get_balance()}") # Function to read files and marks files as evidence def read_file(file_content, file_name): global has_read_file, evidence global balance # Print the file content print_slow(Fore.LIGHTBLUE_EX + f"\n{file_name}:\n\n{file_content}" + Style.RESET_ALL) print_slow("") # Check if the file is one of the specific files that increases evidence count if file_name.lower() in ["employee_performance_review.txt"]: evidence_item = 4 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) fourth_call() if file_name.lower() in ["meeting_minutes.txt"]: evidence_item = 5 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) fifth_call() # Add more file names here as needed # Add money to balance based on the file name if file_name.lower() == "employee_performance_review.txt": balance += 30 elif file_name.lower() == "meeting_minutes.txt": balance += 50 # List of available upgrades upgrades = [ {"name": "EnigmaLink", "description": "Application required to connect to Enigma Corps network.", "price": 100}, {"name": "CodeShatter", "description": "A powerful password breaker that can crack even the strongest passwords.", "price": 250}, {"name": "EyeSpy", "description": "A privacy breaker to gain access to the smallest of cameras.", "price": 500}, {"name": "Rift", "description": "Break the barrier between the Server and Network.", "price": 800} ] # Function to display the shop def shop(): clear_terminal() print_slow(Fore.YELLOW + r''' ██╗░░██╗░█████╗░░█████╗░██╗░░██╗███████╗██████╗░  ███╗░░░███╗░█████╗░██████╗░██╗░░██╗███████╗████████╗ ██║░░██║██╔══██╗██╔══██╗██║░██╔╝██╔════╝██╔══██╗  ████╗░████║██╔══██╗██╔══██╗██║░██╔╝██╔════╝╚══██╔══╝ ███████║███████║██║░░╚═╝█████═╝░█████╗░░██████╔╝  ██╔████╔██║███████║██████╔╝█████═╝░█████╗░░░░░██║░░░ ██╔══██║██╔══██║██║░░██╗██╔═██╗░██╔══╝░░██╔══██╗  ██║╚██╔╝██║██╔══██║██╔══██╗██╔═██╗░██╔══╝░░░░░██║░░░ ██║░░██║██║░░██║╚█████╔╝██║░╚██╗███████╗██║░░██║  ██║░╚═╝░██║██║░░██║██║░░██║██║░╚██╗███████╗░░░██║░░░ ╚═╝░░╚═╝╚═╝░░╚═╝░╚════╝░╚═╝░░╚═╝╚══════╝╚═╝░░╚═╝  ╚═╝░░░░░╚═╝╚═╝░░╚═╝╚═╝░░╚═╝╚═╝░░╚═╝╚══════╝░░░╚═╝░░░''' + Style.RESET_ALL) print_slow(Fore.YELLOW + "\nWelcome to the Hacker's Market!" + Style.RESET_ALL) print_slow("") print_slow(Fore.YELLOW + "Here you can buy upgrades to improve your hacking abilities.\n" + Style.RESET_ALL) while True: # Display the list of available upgrades for i, upgrade in enumerate(upgrades): print_slow( Fore.YELLOW + f"\n{upgrade['name']} - {upgrade['description']} - £{upgrade['price']}" + Style.RESET_ALL) # Get the user's choice command = input(Fore.YELLOW + "\n> " + Style.RESET_ALL) # Buy the chosen upgrade if command.lower() == 'exit': print_slow(Fore.YELLOW + "\nExiting Hacker's Market" + Style.RESET_ALL) time.sleep(1) clear_terminal() start_game() elif command.lower() == 'help': shop_help() elif command.lower().startswith('buy '): upgrade_name = command[4:] # [4:] removes first 4 characters if has_item('EnigmaLink'): if upgrade_name.lower() == 'enigmalink': print_slow("") print_slow(Fore.RED + "Sold Out" + Style.RESET_ALL) time.sleep(1) clear_terminal() shop() else: for upgrade in upgrades: if upgrade_name.lower() == upgrade['name'].lower(): if get_balance() >= upgrade['price']: print_slow("") print_slow( Fore.GREEN + f"You have successfully purchased {upgrade['name']} for ${upgrade['price']}!" + Style.RESET_ALL) subtract_money(upgrade['price']) print_slow("") print_balance() add_to_inventory(upgrade['name']) time.sleep(2) clear_terminal() # Check if the purchased upgrade is CodeShatter if upgrade_name.lower() == 'codeshatter': print_slow("") print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) code_shatter_call() shop() else: clear_terminal() shop() else: print_slow( Fore.RED + "You don't have enough money to buy this upgrade." + Style.RESET_ALL) time.sleep(1) clear_terminal() shop() else: print_slow(Fore.RED + "Invalid choice, please try again." + Style.RESET_ALL) time.sleep(1) clear_terminal() shop() else: for upgrade in upgrades: if upgrade_name.lower() == upgrade['name'].lower(): if get_balance() >= upgrade['price']: print_slow("") print_slow( Fore.GREEN + f"You have successfully purchased {upgrade['name']} for ${upgrade['price']}!" + Style.RESET_ALL) subtract_money(upgrade['price']) print_slow("") print_balance() add_to_inventory(upgrade['name']) time.sleep(2) clear_terminal() shop() else: print_slow( Fore.RED + "You don't have enough money to buy this upgrade." + Style.RESET_ALL) shop() else: print_slow(Fore.RED + "Invalid choice, please try again." + Style.RESET_ALL) time.sleep(1) clear_terminal() shop() # Function to start the game def start_game(): global has_intro_call, has_started_game, seen_markus if has_intro_call: clear_terminal() pass else: print_slow("\nStarting game...") time.sleep(1) print_slow("\nLoading assets...") time.sleep(1) clear_terminal() print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) intro_call() has_intro_call = True has_started_game = True print_slow(Fore.MAGENTA + "\nHint: Type 'help' to get a list of available commands." + Style.RESET_ALL) pass if seen_markus: print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) markus_seen_call() else: pass # Game command loop command = input(Fore.GREEN + "> " + Style.RESET_ALL) # Connect to the network if command.lower() == "connect": connect() # Access the mail system elif command.lower() == "mail": mail() # Display help message elif command.lower() == "help": help_user() # Check balance elif command.lower() == "balance": print_balance() # Enter shop elif command.lower() == "shop": shop() # Clear terminal elif command.lower() == "clear": clear_terminal() # Return to the main menu elif command.lower() == "exit": print_slow("Returning to Main Menu...") time.sleep(1) main() else: print_slow("Invalid command, please try again.") time.sleep(1) clear_terminal() start_game() # Save the game state save_game() # Function to check if an item is in the inventory def has_item(item): return item in inventory def scan(): print_slow("") print_slow(Fore.YELLOW + "Scanning network..." + Style.RESET_ALL) time.sleep(2) print_slow("") print_slow(Fore.YELLOW + "\nAvailable Systems:" + Style.RESET_ALL) print_slow("") for system in all_systems: if system['level'] == player_level: print_slow("") print_slow(f"{system['name']} ({system['type']})") print_slow("") def getpass_star(prompt="Password: "): print(prompt, end='', flush=True) password = [] while True: char = msvcrt.getch().decode('utf-8') if char == '\r' or char == '\n': break elif char == '\b': # Backspace if password: password.pop() print('\b \b', end='', flush=True) else: password.append(char) print('*', end='', flush=True) print() # Move to the next line return ''.join(password) def hack(system_name): global seen_markus # Find the system in the all_systems list system = next((s for s in all_systems if s['name'].lower() == system_name.lower()), None) if system: if system['level'] == player_level: # Check for CodeShatter before prompting for password if system['name'] == 'Markus' and has_item("CodeShatter"): clear_terminal() code_shatter_minigame() print_slow("Password Cracked: 735@&!//") input("Press [Enter] to continue") clear_terminal() markus_system_command_loop(markus_system) add_level(player_level) remove_from_inventory(item="CodeShatter") seen_markus = True elif system['name'] == 'Lobby Camera' and has_item("EyeSpy"): port_scanning() add_level(player_level) camera_first() else: # Prompt the user for the password print_slow("") password = getpass_star("Enter password: ") print_slow("") if password == system['password']: print_slow("") print_slow(Fore.GREEN + "Access granted!" + Style.RESET_ALL) if system['name'] == 'Amy': amy_system_command_loop(amy_system) elif system['name'] == 'Billy': billy_system_command_loop(billy_system) elif system['name'] == 'Markus': markus_system_command_loop(markus_system) add_level(player_level) seen_markus = True elif system['name'] == 'Lobby Camera': camera_first() elif system['name'] == 'Kyle': # Implement Kyle System else: # Add more conditions for other systems pass else: print_slow("") print_slow(Fore.RED + "Access denied! Incorrect password." + Style.RESET_ALL) else: print_slow("") print_slow(Fore.RED + "System not found! Please try again." + Style.RESET_ALL) else: print_slow("") print_slow(Fore.RED + "System not found! Please try again." + Style.RESET_ALL) def list_emails(emails): print_slow(Fore.LIGHTBLUE_EX + "\nEmails:" + Style.RESET_ALL) for i, email in enumerate(emails): print_slow(Fore.LIGHTBLUE_EX + f"\n{email['subject']} - From: {email['sender']}" + Style.RESET_ALL) def read_email(emails, subject): global has_read_email, evidence global balance email_found = False for email in emails: if email['subject'].lower() == subject.lower(): email_found = True print_slow( Fore.LIGHTBLUE_EX + f"\nFrom: {email['sender']}\nSubject: {email['subject']}\n\n{email['body']}" + Style.RESET_ALL) # Check if the email is one of the specific emails that increases evidence count if email['subject'].lower() in ["project update"]: evidence_item = 3 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) third_call() if email['subject'].lower() in ["professional development"]: evidence_item = 2 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) second_call() if email['subject'].lower() == "can't stop thinking about you" and email['sender'].lower() == 'amy': evidence_item = 1 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) first_call() if email['subject'].lower() == "upcoming software update" and email['sender'].lower() == 'markus': evidence_item = 6 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) sixth_call() # Add money to balance based on the email subject if email['subject'].lower() == "professional development": balance += 30 elif email['subject'].lower() == "project update": balance += 50 elif email['subject'].lower() == "can't stop thinking about you": balance += 20 elif email['subject'].lower() == "upcoming software update": balance += 50 if not email_found: print_slow(Fore.RED + "\nNo email found with that subject, please try again." + Style.RESET_ALL) def connect(): if has_item("EnigmaLink"): print_slow("") print_slow(Fore.GREEN + "Connecting to Enigma Corps network using EnigmaLink..." + Style.RESET_ALL) time.sleep(0.5) print_slow("") print_slow(Fore.GREEN + "Establishing connection...") time.sleep(1) print_slow("") print_slow(Fore.GREEN + "Linking EnigmaLink to remote server...") time.sleep(2) print_slow("") print_slow(Fore.GREEN + "Decrypting server security protocols...") time.sleep(3) print_slow("") print_slow(Fore.GREEN + "Bypassing firewall...") time.sleep(2) print_slow("") print_slow(Fore.GREEN + "Connection established!") time.sleep(2) print_slow("") print_slow(Fore.GREEN + "You are now connected to Enigma Corps network.") print_slow("") # Network command loop while True: command = input(Fore.GREEN + "> " + Style.RESET_ALL) # Scan the network for systems and vulnerabilities if command.lower() == "scan": scan() # Hack into a system or vulnerability elif command.lower().startswith("hack "): target = command[5:] hack(target) # Display connect help message elif command.lower() == "help":
connect_help()
4
2023-11-06 09:52:13+00:00
24k
ziqi-zhang/TAOISM
python/test/test_conv.py
[ { "identifier": "register_layer", "path": "python/common_net.py", "snippet": "def register_layer(layer, name):\n layer.register_forward_hook(hooking_layer(name))\n layer.register_backward_hook(hooking_layer_backward(name))\n layer_names.append(name)" }, { "identifier": "register_weight_...
import os import sys import numpy as np import torch import torch.distributed as dist import sys import pdb from pdb import set_trace as st from torch import optim, nn from python.common_net import register_layer, register_weight_layer, get_layer_weight, get_layer_input, \ get_layer_weight_grad, get_layer_output, get_layer_output_grad, get_layer_input_grad from python.enclave_interfaces import GlobalTensor from python.layers.batch_norm_2d import SecretBatchNorm2dLayer from python.layers.flatten import SecretFlattenLayer from python.layers.input import SecretInputLayer from python.layers.maxpool2d import SecretMaxpool2dLayer from python.layers.output import SecretOutputLayer from python.layers.relu import SecretReLULayer from python.sgx_net import init_communicate, warming_up_cuda, SecretNeuralNetwork, SgdOptimizer from python.layers.sgx_linear_base import SGXLinearBase from python.layers.sgx_conv_base import SGXConvBase from python.utils.basic_utils import ExecutionModeOptions from python.utils.logger_utils import Logger from python.quantize_net import NetQ from python.test_sgx_net import argparser_distributed, marshal_process, load_cifar10, seed_torch from python.utils.timer_utils import NamedTimerInstance, VerboseLevel, NamedTimer from python.utils.torch_utils import compare_expected_actual from pdb import set_trace as st
21,043
device_cuda = torch.device("cuda:0") torch.set_printoptions(precision=10) def compare_layer_member(layer: SGXLinearBase, layer_name: str, extract_func , member_name: str, save_path=None) -> None: print(member_name) layer.make_sure_cpu_is_latest(member_name) compare_expected_actual(extract_func(layer_name), layer.get_cpu(member_name), get_relative=True, verbose=True) if save_path is not None: if not os.path.exists(save_path): os.makedirs(save_path) print("Directory ", save_path, " Created ") else: print("Directory ", save_path, " already exists") torch.save(extract_func(layer_name), os.path.join(save_path, member_name + "_expected")) torch.save(layer.get_cpu(member_name), os.path.join(save_path, member_name + "_actual")) def compare_layer(layer: SGXLinearBase, layer_name: str, save_path=None) -> None: print("comparing with layer in expected NN :", layer_name)
device_cuda = torch.device("cuda:0") torch.set_printoptions(precision=10) def compare_layer_member(layer: SGXLinearBase, layer_name: str, extract_func , member_name: str, save_path=None) -> None: print(member_name) layer.make_sure_cpu_is_latest(member_name) compare_expected_actual(extract_func(layer_name), layer.get_cpu(member_name), get_relative=True, verbose=True) if save_path is not None: if not os.path.exists(save_path): os.makedirs(save_path) print("Directory ", save_path, " Created ") else: print("Directory ", save_path, " already exists") torch.save(extract_func(layer_name), os.path.join(save_path, member_name + "_expected")) torch.save(layer.get_cpu(member_name), os.path.join(save_path, member_name + "_actual")) def compare_layer(layer: SGXLinearBase, layer_name: str, save_path=None) -> None: print("comparing with layer in expected NN :", layer_name)
compare_name_function = [("input", get_layer_input), ("output", get_layer_output),
5
2023-11-01 10:37:37+00:00
24k
Codra-Ingenierie-Informatique/DataLab
cdl/tests/scenarios/common.py
[ { "identifier": "_", "path": "cdl/config.py", "snippet": "CONF_VERSION = \"1.0.0\"\nAPP_NAME = \"DataLab\"\nMOD_NAME = \"cdl\"\nAPP_DESC = _(\"\"\"DataLab is a generic signal and image processing platform\"\"\")\nAPP_PATH = osp.dirname(__file__)\nDEBUG = os.environ.get(\"DEBUG\", \"\").lower() in (\"1\"...
import numpy as np import cdl.obj as dlo import cdl.param as dlp from cdl.config import _ from cdl.core.gui.main import CDLMainWindow from cdl.core.gui.panel.image import ImagePanel from cdl.core.gui.panel.signal import SignalPanel from cdl.tests.data import ( create_paracetamol_signal, create_peak2d_image, create_sincos_image, ) from cdl.tests.features.common.newobject_unit import ( iterate_image_creation, iterate_signal_creation, ) from cdl.widgets import fitdialog
19,718
panel.add_label_with_title() __compute_11_operations(panel, 2) def run_signal_computations( win: CDLMainWindow, data_size: int = 500, all_types: bool = True ) -> None: """Testing signal features""" panel = win.signalpanel win.set_current_panel("signal") if all_types: for signal in iterate_signal_creation(data_size, non_zero=True): panel.add_object(create_paracetamol_signal(data_size)) panel.add_object(signal) compute_common_operations(panel) panel.remove_all_objects() sig1 = create_paracetamol_signal(data_size) win.add_object(sig1) # Add new signal based on s0 panel.objview.set_current_object(sig1) newparam = dlo.new_signal_param( _("Random function"), stype=dlo.SignalTypes.UNIFORMRANDOM ) addparam = dlo.UniformRandomParam.create(vmin=0, vmax=sig1.y.max() * 0.2) panel.new_object(newparam, addparam=addparam, edit=False) compute_common_operations(panel) win.add_object(create_paracetamol_signal(data_size)) param = dlp.NormalizeYParam() for _name, method in param.methods: param.method = method panel.processor.compute_normalize(param) param = dlp.XYCalibrateParam.create(a=1.2, b=0.1) panel.processor.compute_calibration(param) panel.processor.compute_derivative() panel.processor.compute_integral() param = dlp.PeakDetectionParam() panel.processor.compute_peak_detection(param) panel.processor.compute_multigaussianfit() panel.objview.select_objects([-3]) sig = panel.objview.get_sel_objects()[0] i1 = data_size // 10 i2 = len(sig.y) - i1 panel.processor.compute_roi_extraction(dlp.ROIDataParam.create([[i1, i2]])) param = dlp.PolynomialFitParam() panel.processor.compute_polyfit(param) panel.processor.compute_fit(_("Gaussian fit"), fitdialog.gaussianfit) panel.processor.compute_fit(_("Lorentzian fit"), fitdialog.lorentzianfit) panel.processor.compute_fit(_("Voigt fit"), fitdialog.voigtfit) newparam = dlo.new_signal_param(_("Gaussian"), stype=dlo.SignalTypes.GAUSS) sig = dlo.create_signal_from_param( newparam, dlo.GaussLorentzVoigtParam(), edit=False ) panel.add_object(sig) param = dlp.FWHMParam() for fittype, _name in param.fittypes: param.fittype = fittype panel.processor.compute_fwhm(param) panel.processor.compute_fw1e2() # Create a new signal which X values are a subset of sig1 x = np.linspace(sig1.x.min(), sig1.x.max(), data_size // 2)[: data_size // 4] y = x * 0.0 sig2 = dlo.create_signal("X values for interpolation", x, y) panel.add_object(sig2) # Test interpolation for method_choice_tuple in dlp.InterpolationParam._methods: method = method_choice_tuple[0] for fill_value in (None, 0.0): panel.objview.set_current_object(sig1) param = dlp.InterpolationParam.create(method=method, fill_value=fill_value) panel.processor.compute_interpolation(sig2, param) # Test resampling xmin, xmax = x[0], x[-1] for mode, dx, nbpts in (("dx", 0.1, 10), ("nbpts", 0.0, 100)): panel.objview.set_current_object(sig1) param = dlp.ResamplingParam.create( xmin=xmin, xmax=xmax, mode=mode, dx=dx, nbpts=nbpts ) panel.processor.compute_resampling(param) # Test convolution panel.objview.set_current_object(sig1) panel.processor.compute_derivative() panel.processor.compute_convolution(sig1) # Test detrending panel.objview.set_current_object(sig1) for method_choice_tuple in dlp.DetrendingParam._methods: param = dlp.DetrendingParam.create(method=method_choice_tuple[0]) panel.processor.compute_detrending(param) def run_image_computations( win: CDLMainWindow, data_size: int = 150, all_types: bool = True ) -> None: """Testing signal features""" win.set_current_panel("image") panel = win.imagepanel newparam = dlo.new_image_param(height=data_size, width=data_size) if all_types:
# -*- coding: utf-8 -*- # # Licensed under the terms of the BSD 3-Clause # (see cdl/LICENSE for details) """ Scenarios common functions """ # pylint: disable=invalid-name # Allows short reference names like x, y, ... # guitest: skip from __future__ import annotations def __compute_11_operations(panel: SignalPanel | ImagePanel, number: int) -> None: """Test compute_11 type operations on a signal or image Requires that one signal or image has been added at index.""" assert len(panel) >= number - 1 panel.objview.select_objects((number,)) panel.processor.compute_gaussian_filter(dlp.GaussianParam()) panel.processor.compute_moving_average(dlp.MovingAverageParam()) panel.processor.compute_moving_median(dlp.MovingMedianParam()) panel.processor.compute_wiener() panel.processor.compute_fft() panel.processor.compute_ifft() panel.processor.compute_abs() panel.remove_object() panel.processor.compute_re() panel.remove_object() panel.processor.compute_im() panel.remove_object() panel.processor.compute_astype(dlp.DataTypeIParam.create(dtype="float64")) panel.processor.compute_log10() panel.processor.compute_swap_axes() panel.processor.compute_swap_axes() def compute_common_operations(panel: SignalPanel | ImagePanel) -> None: """Test operations common to signal/image Requires that two (and only two) signals/images are created/added to panel First signal/image is supposed to be always the same (reference) Second signal/image is the tested object """ assert len(panel) == 2 panel.objview.select_objects((2,)) panel.processor.compute_difference(panel[1]) # difference with obj #1 panel.remove_object() panel.objview.select_objects((2,)) panel.processor.compute_quadratic_difference() # quadratic difference with itself panel.delete_metadata() panel.objview.select_objects((3,)) panel.remove_object() panel.objview.select_objects((1, 2)) panel.processor.compute_sum() panel.objview.select_objects((1, 2)) panel.processor.compute_sum() panel.objview.select_objects((1, 2)) panel.processor.compute_product() obj = panel.objmodel.get_groups()[0][-1] param = dlp.ThresholdParam() param.value = (obj.data.max() - obj.data.min()) * 0.2 + obj.data.min() panel.processor.compute_threshold(param) param = dlp.ClipParam() # Clipping before division... param.value = (obj.data.max() - obj.data.min()) * 0.8 + obj.data.min() panel.processor.compute_clip(param) panel.objview.select_objects((3, 7)) panel.processor.compute_division() panel.objview.select_objects((1, 2, 3)) panel.processor.compute_average() panel.add_label_with_title() __compute_11_operations(panel, 2) def run_signal_computations( win: CDLMainWindow, data_size: int = 500, all_types: bool = True ) -> None: """Testing signal features""" panel = win.signalpanel win.set_current_panel("signal") if all_types: for signal in iterate_signal_creation(data_size, non_zero=True): panel.add_object(create_paracetamol_signal(data_size)) panel.add_object(signal) compute_common_operations(panel) panel.remove_all_objects() sig1 = create_paracetamol_signal(data_size) win.add_object(sig1) # Add new signal based on s0 panel.objview.set_current_object(sig1) newparam = dlo.new_signal_param( _("Random function"), stype=dlo.SignalTypes.UNIFORMRANDOM ) addparam = dlo.UniformRandomParam.create(vmin=0, vmax=sig1.y.max() * 0.2) panel.new_object(newparam, addparam=addparam, edit=False) compute_common_operations(panel) win.add_object(create_paracetamol_signal(data_size)) param = dlp.NormalizeYParam() for _name, method in param.methods: param.method = method panel.processor.compute_normalize(param) param = dlp.XYCalibrateParam.create(a=1.2, b=0.1) panel.processor.compute_calibration(param) panel.processor.compute_derivative() panel.processor.compute_integral() param = dlp.PeakDetectionParam() panel.processor.compute_peak_detection(param) panel.processor.compute_multigaussianfit() panel.objview.select_objects([-3]) sig = panel.objview.get_sel_objects()[0] i1 = data_size // 10 i2 = len(sig.y) - i1 panel.processor.compute_roi_extraction(dlp.ROIDataParam.create([[i1, i2]])) param = dlp.PolynomialFitParam() panel.processor.compute_polyfit(param) panel.processor.compute_fit(_("Gaussian fit"), fitdialog.gaussianfit) panel.processor.compute_fit(_("Lorentzian fit"), fitdialog.lorentzianfit) panel.processor.compute_fit(_("Voigt fit"), fitdialog.voigtfit) newparam = dlo.new_signal_param(_("Gaussian"), stype=dlo.SignalTypes.GAUSS) sig = dlo.create_signal_from_param( newparam, dlo.GaussLorentzVoigtParam(), edit=False ) panel.add_object(sig) param = dlp.FWHMParam() for fittype, _name in param.fittypes: param.fittype = fittype panel.processor.compute_fwhm(param) panel.processor.compute_fw1e2() # Create a new signal which X values are a subset of sig1 x = np.linspace(sig1.x.min(), sig1.x.max(), data_size // 2)[: data_size // 4] y = x * 0.0 sig2 = dlo.create_signal("X values for interpolation", x, y) panel.add_object(sig2) # Test interpolation for method_choice_tuple in dlp.InterpolationParam._methods: method = method_choice_tuple[0] for fill_value in (None, 0.0): panel.objview.set_current_object(sig1) param = dlp.InterpolationParam.create(method=method, fill_value=fill_value) panel.processor.compute_interpolation(sig2, param) # Test resampling xmin, xmax = x[0], x[-1] for mode, dx, nbpts in (("dx", 0.1, 10), ("nbpts", 0.0, 100)): panel.objview.set_current_object(sig1) param = dlp.ResamplingParam.create( xmin=xmin, xmax=xmax, mode=mode, dx=dx, nbpts=nbpts ) panel.processor.compute_resampling(param) # Test convolution panel.objview.set_current_object(sig1) panel.processor.compute_derivative() panel.processor.compute_convolution(sig1) # Test detrending panel.objview.set_current_object(sig1) for method_choice_tuple in dlp.DetrendingParam._methods: param = dlp.DetrendingParam.create(method=method_choice_tuple[0]) panel.processor.compute_detrending(param) def run_image_computations( win: CDLMainWindow, data_size: int = 150, all_types: bool = True ) -> None: """Testing signal features""" win.set_current_panel("image") panel = win.imagepanel newparam = dlo.new_image_param(height=data_size, width=data_size) if all_types:
for image in iterate_image_creation(data_size, non_zero=True):
7
2023-11-09 16:56:03+00:00
24k
ingra14m/Tensor4D-DNeRF
exp_runner.py
[ { "identifier": "Dataset", "path": "models/dataset.py", "snippet": "class Dataset:\n def __init__(self, conf):\n super(Dataset, self).__init__()\n print('Load data: Begin')\n self.device = torch.device('cuda')\n self.conf = conf\n\n self.data_dir = conf.get_string('...
import os import time import logging import argparse import numpy as np import cv2 as cv import torch import torch.nn.functional as F from torch.utils.tensorboard import SummaryWriter from shutil import copyfile from tqdm import tqdm from pyhocon import ConfigFactory from models.dataset import Dataset, BlenderDataset from models.fields import RenderingNetwork, FieldNetwork, SingleVarianceNetwork from models.tensor4d import Tensor4D from models.renderer import NeuSRenderer from models.mask import Mask3D from metrics import *
15,678
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' class Runner: def __init__(self, conf_path, mode='train', case='CASE_NAME', is_continue=False): self.device = torch.device('cuda') # Configuration self.conf_path = conf_path f = open(self.conf_path) conf_text = f.read() conf_text = conf_text.replace('CASE_NAME', case) f.close() self.conf = ConfigFactory.parse_string(conf_text) self.conf['dataset.data_dir'] = self.conf['dataset.data_dir'].replace('CASE_NAME', case) self.base_exp_dir = self.conf['general.base_exp_dir'] os.makedirs(self.base_exp_dir, exist_ok=True) self.is_blender = self.conf['dataset'].get_bool('is_blender', default=False)
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' class Runner: def __init__(self, conf_path, mode='train', case='CASE_NAME', is_continue=False): self.device = torch.device('cuda') # Configuration self.conf_path = conf_path f = open(self.conf_path) conf_text = f.read() conf_text = conf_text.replace('CASE_NAME', case) f.close() self.conf = ConfigFactory.parse_string(conf_text) self.conf['dataset.data_dir'] = self.conf['dataset.data_dir'].replace('CASE_NAME', case) self.base_exp_dir = self.conf['general.base_exp_dir'] os.makedirs(self.base_exp_dir, exist_ok=True) self.is_blender = self.conf['dataset'].get_bool('is_blender', default=False)
self.dataset = BlenderDataset(self.conf['dataset']) if self.is_blender else Dataset(self.conf['dataset'])
0
2023-11-07 10:16:33+00:00
24k
Giftify-Bot/Giftify-Bot
bot.py
[ { "identifier": "GuildConfig", "path": "models/giveaway_settings.py", "snippet": "class GuildConfig:\n \"\"\"Represents the configuration settings for a guild.\n\n Parameters\n ----------\n guild: discord.Guild\n The guild associated with the configuration.\n logging: Optional[disc...
import asyncio import datetime import logging import os import pathlib import sys import traceback import aiohttp import asyncpg import discord import dotenv import jishaku import sentry_sdk import uvloop from logging.handlers import RotatingFileHandler from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple from amari import AmariClient from discord.ext import commands from discord.utils import MISSING from discord.utils import _ColourFormatter as ColourFormatter from expiringdict import ExpiringDict from sentry_sdk.integrations.logging import LoggingIntegration from models.giveaway_settings import GuildConfig from models.giveaways import Giveaway from models.raffles import Raffle from utils.constants import ERROR_EMOJI, SUCCESS_EMOJI, WARN_EMOJI from utils.db import db_init from utils.tree import CommandTree from utils.view import ConfirmationView from cogs.timer_manager import TimerManager from models.donation_settings import GuildDonationConfig
14,886
async def fetch_level(self, member: discord.Member, /) -> int: """Fetches user level from Amari Bot API. Parameters ----------- member: discord.Member The member whose level is to be fetched. Returns --------- int The retrieved level. """ try: user = await self.amari_client.fetch_user(member.guild.id, member.id) except Exception: return 0 else: return user.level or 0 async def fetch_weekly_experience(self, member: discord.Member, /) -> int: """Fetches user's weekly experience from Amari Bot API. Parameters ----------- member: discord.Member The member whose weekly experience is to be fetched. Returns --------- int The retrieved weekly experience. """ try: user = await self.amari_client.fetch_user(member.guild.id, member.id) except Exception: return 0 else: return user.weeklyexp or 0 async def prompt( self, message: str, *, interaction: discord.Interaction[Giftify], success_message: str, cancel_message: str, timeout: float = 60.0, ) -> Optional[bool]: """An interactive reaction confirmation dialog. Parameters ----------- message: str The message to show along with the prompt. timeout: float How long to wait before returning. interaction: Interaction The interaction object to handle the confirmation dialog. success_message: str The message to show when the user clicks Confirm. cancel_message: str The message to show when the user clicks Cancel. Returns -------- Optional[bool] ``True`` if explicit confirm, ``False`` if explicit deny, ``None`` if deny due to timeout """ view = ConfirmationView( timeout=timeout, interaction=interaction, success_message=success_message, cancel_message=cancel_message, ) view.message = await self.send(interaction, message, view=view, reason="warn") await view.wait() return view.value class Giftify(GiftifyHelper, commands.AutoShardedBot): user: discord.ClientUser colour: int = 0xCB3045 __version_info__ = "1.1.4" def __init__( self, *, log_handler: LogHandler, pool: asyncpg.Pool, session: aiohttp.ClientSession, amari_client: AmariClient, ) -> None: self._log_handler = log_handler self._pool = pool self._session = session self._amari_client = amari_client intents = discord.Intents(messages=True, emojis=True, guilds=True) allowed_mentions = discord.AllowedMentions(everyone=False, roles=False, users=True, replied_user=False) member_cache_flags = discord.MemberCacheFlags.from_intents(intents=intents) sentry_sdk.init( dsn=os.environ["SENTRY_DSN"], integrations=[ LoggingIntegration( level=logging.INFO, event_level=logging.ERROR, ) ], traces_sample_rate=1.0, ) super().__init__( command_prefix=commands.when_mentioned,
from __future__ import annotations if TYPE_CHECKING: dotenv.load_dotenv() try: except ImportError: # Windows pass else: uvloop.install() jishaku.Flags.HIDE = True jishaku.Flags.RETAIN = True jishaku.Flags.NO_UNDERSCORE = True jishaku.Flags.NO_DM_TRACEBACK = True OWNER_IDS = (747403406154399765,) EXTENSIONS: Tuple[str, ...] = ( "meta", "settings", "timers", "giveaways", "donations", "raffles", "logger", "webserver", ) class RemoveNoise(logging.Filter): def __init__(self) -> None: super().__init__(name="discord.state") def filter(self, record) -> bool: if record.levelname == "WARNING" and "referencing an unknown" in record.msg: return False return True class LogHandler: def __init__(self, stream: bool = True) -> None: self.log: logging.Logger = logging.getLogger() self.max_bytes: int = 32 * 1024 * 1024 self.logging_path = pathlib.Path("./logs/") self.logging_path.mkdir(exist_ok=True) self.stream = stream async def __aenter__(self) -> "LogHandler": return self.__enter__() def __enter__(self: "LogHandler") -> "LogHandler": logging.getLogger("discord").setLevel(logging.INFO) logging.getLogger("discord.http").setLevel(logging.INFO) logging.getLogger("discord.state").addFilter(RemoveNoise()) self.log.setLevel(logging.INFO) handler = RotatingFileHandler( filename=self.logging_path / "Giftify.log", encoding="utf-8", mode="w", maxBytes=self.max_bytes, backupCount=5, ) dt_fmt = "%Y-%m-%d %H:%M:%S" fmt = logging.Formatter("[{asctime}] [{levelname:<7}] {name}: {message}", dt_fmt, style="{") handler.setFormatter(fmt) self.log.addHandler(handler) if self.stream: stream_handler = logging.StreamHandler() stream_handler.setFormatter(ColourFormatter()) self.log.addHandler(stream_handler) return self async def __aexit__(self, *args: Any) -> None: return self.__exit__(*args) def __exit__(self, *args: Any) -> None: handlers = self.log.handlers[:] for handler in handlers: handler.close() self.log.removeHandler(handler) class GiftifyHelper: configs: List[GuildConfig] = [] donation_configs: List[GuildDonationConfig] = [] cached_giveaways: List["Giveaway"] = [] webhook_cache: Dict[discord.TextChannel, discord.Webhook] = {} raffles_cache: Dict[discord.Guild, List[Raffle]] = ExpiringDict(max_len=100, max_age_seconds=300) pool: asyncpg.Pool user: discord.ClientUser amari_client: AmariClient """A helper class for Giftify's operations. This class provides methods to send interaction messages with embeds, fetch webhooks for a channel, and retrieve or fetch guild configuration. """ async def send( self, interaction: discord.Interaction, message: str, reason: str = "success", ephemeral: bool = True, view: discord.ui.View = MISSING, ) -> None: """Sends an interaction message with embed. Parameters ----------- interaction: discord.Interaction The interaction to respond to. message: str The response message to send. reason: str The reason to send the message, can be "warn", "error" or "success". ephemeral: bool If the response should be sent ephemerally. """ emoji = WARN_EMOJI if reason == "warn" else ERROR_EMOJI if reason == "error" else SUCCESS_EMOJI colour = ( discord.Colour.orange() if reason == "warn" else discord.Colour.red() if reason == "error" else discord.Colour.green() ) embed = discord.Embed(description=f"> {emoji} {message}", colour=colour) if interaction.response.is_done(): await interaction.followup.send(embed=embed, view=view, ephemeral=ephemeral) else: await interaction.response.send_message(embed=embed, view=view, ephemeral=ephemeral) async def _get_webhook(self, channel: discord.TextChannel, force_create: bool = False) -> discord.Webhook: if not force_create and (webhook := self.webhook_cache.get(channel)): return webhook webhook_list = await channel.webhooks() if webhook_list: for hook in webhook_list: if hook.token: if hook.user and hook.user.id == self.user.id: self.webhook_cache[channel] = hook return hook # If no suitable webhook is found, create a new one hook = await channel.create_webhook(name="Giftify Logging", avatar=await channel.guild.me.display_avatar.read()) self.webhook_cache[channel] = hook return hook async def send_to_webhook(self, channel: discord.TextChannel, embed: discord.Embed): """Sends an embed to a webhook associated with the provided channel. Parameters ----------- channel: discord.TextChannel The channel to send message to. """ try: webhook = await self._get_webhook(channel) await webhook.send(embed=embed, username="Giftify Logging", avatar_url=self.user.display_avatar) except discord.NotFound: new_webhook = await self._get_webhook(channel, force_create=True) await new_webhook.send(embed=embed, username="Giftify Logging", avatar_url=self.user.display_avatar) except discord.HTTPException: return async def fetch_config(self, guild: discord.Guild) -> GuildConfig: """Looks up a guild config in cache or fetches if not found. Parameters ----------- guild: discord.Guild The guild to look for. Returns --------- GuildConfig The retrieved guild config object. """ config = discord.utils.get(self.configs, guild=guild) if not config: config = await GuildConfig.fetch(guild, self.pool) self.configs.append(config) return config def get_donation_config(self, guild: discord.Guild, category: str) -> Optional[GuildDonationConfig]: """Finds the donation config of a guild for some category. Parameters ----------- guild: Guild The guild to which the category belongs. category: str The name of the category. Returns -------- Optional[GuildDonationConfig] The fetched donation config. """ for config in self.donation_configs: if config.guild == guild and config.category == category: return config def get_guild_donation_categories(self, guild: discord.Guild) -> List[str]: """Finds the donation categories of a guild. Parameters ----------- guild: Guild The guild for which categories will be fetched. Returns -------- List[str] The of names of donation categories. """ return [config.category for config in self.donation_configs if config.guild == guild] async def fetch_raffle(self, guild: discord.Guild, name: str) -> Optional[Raffle]: """Finds a raffle in some guild. Parameters ----------- guild: Guild The guild to which the raffle belongs. name: str The name of the raffle. Returns -------- Optional[Raffle] The fetched raffle. """ record = await self.pool.fetchrow("SELECT * FROM raffles WHERE guild = $1 AND name = $2", guild.id, name) if record is not None: return await Raffle.from_record(self, record=record) # type: ignore async def fetch_raffles(self, guild: discord.Guild, use_cache: bool = True) -> List[Raffle]: """Fetch all the raffles in some guild Parameters ----------- guild: Guild The guild for which raffles will be fetched. use_cache: bool Indicates wheter the bot should fetch the raffles from database or use internal cache. Returns -------- List[Raffle] The of list of fetched raffles. """ if guild in self.raffles_cache and use_cache: return self.raffles_cache[guild] records = await self.pool.fetch("SELECT * FROM raffles WHERE guild = $1", guild.id) raffles = [await Raffle.from_record(self, record=record) for record in records] # type: ignore self.raffles_cache[guild] = raffles return raffles async def fetch_giveaway(self, *, guild_id: int, channel_id: int, message_id: int) -> Optional[Giveaway]: """Looks up a for a giveaway object in database. Parameters ----------- message_id: int The ID of the giveaway message. Returns -------- Optional[Giveaway] The retrieved giveaway object. """ giveaway = discord.utils.get( self.cached_giveaways, guild_id=guild_id, channel_id=channel_id, message_id=message_id, ) if giveaway is not None: return giveaway record = await self.pool.fetchrow( "SELECT * FROM giveaways WHERE guild = $1 AND channel = $2 AND message = $3", guild_id, channel_id, message_id, ) if record is not None: giveaway = Giveaway(bot=self, record=record) # type: ignore if giveaway.messages: self.cached_giveaways.append(giveaway) return giveaway async def running_giveaways(self, *, guild_id: Optional[int] = None, sort_by_ends: bool = True) -> List[Giveaway]: """Looks up a list of active giveaways in the database. Parameters ----------- guild_id: Optional[int] The ID of the guild. If provided, fetches giveaways only for that guild. sort_by_ends: bool If True, the results will be sorted by the 'ends' column in ascending order. Returns -------- List[Giveaway] The list of fetched active giveaways. """ query = "SELECT * FROM giveaways WHERE ended = FALSE" if guild_id is not None: query += " AND guild = $1" if sort_by_ends: query += " ORDER BY ends ASC" if guild_id is not None: records = await self.pool.fetch(query, guild_id) else: records = await self.pool.fetch(query) return [Giveaway(bot=self, record=record) for record in records] # type: ignore async def fetch_level(self, member: discord.Member, /) -> int: """Fetches user level from Amari Bot API. Parameters ----------- member: discord.Member The member whose level is to be fetched. Returns --------- int The retrieved level. """ try: user = await self.amari_client.fetch_user(member.guild.id, member.id) except Exception: return 0 else: return user.level or 0 async def fetch_weekly_experience(self, member: discord.Member, /) -> int: """Fetches user's weekly experience from Amari Bot API. Parameters ----------- member: discord.Member The member whose weekly experience is to be fetched. Returns --------- int The retrieved weekly experience. """ try: user = await self.amari_client.fetch_user(member.guild.id, member.id) except Exception: return 0 else: return user.weeklyexp or 0 async def prompt( self, message: str, *, interaction: discord.Interaction[Giftify], success_message: str, cancel_message: str, timeout: float = 60.0, ) -> Optional[bool]: """An interactive reaction confirmation dialog. Parameters ----------- message: str The message to show along with the prompt. timeout: float How long to wait before returning. interaction: Interaction The interaction object to handle the confirmation dialog. success_message: str The message to show when the user clicks Confirm. cancel_message: str The message to show when the user clicks Cancel. Returns -------- Optional[bool] ``True`` if explicit confirm, ``False`` if explicit deny, ``None`` if deny due to timeout """ view = ConfirmationView( timeout=timeout, interaction=interaction, success_message=success_message, cancel_message=cancel_message, ) view.message = await self.send(interaction, message, view=view, reason="warn") await view.wait() return view.value class Giftify(GiftifyHelper, commands.AutoShardedBot): user: discord.ClientUser colour: int = 0xCB3045 __version_info__ = "1.1.4" def __init__( self, *, log_handler: LogHandler, pool: asyncpg.Pool, session: aiohttp.ClientSession, amari_client: AmariClient, ) -> None: self._log_handler = log_handler self._pool = pool self._session = session self._amari_client = amari_client intents = discord.Intents(messages=True, emojis=True, guilds=True) allowed_mentions = discord.AllowedMentions(everyone=False, roles=False, users=True, replied_user=False) member_cache_flags = discord.MemberCacheFlags.from_intents(intents=intents) sentry_sdk.init( dsn=os.environ["SENTRY_DSN"], integrations=[ LoggingIntegration( level=logging.INFO, event_level=logging.ERROR, ) ], traces_sample_rate=1.0, ) super().__init__( command_prefix=commands.when_mentioned,
tree_cls=CommandTree,
7
2023-11-09 15:00:15+00:00
24k
Kushalhk/AutoFilter
plugins/p_ttishow.py
[ { "identifier": "ADMINS", "path": "info.py", "snippet": "ADMINS = [int(admin) if id_pattern.search(admin) else admin for admin in environ.get('ADMINS', '').split()]" }, { "identifier": "LOG_CHANNEL", "path": "info.py", "snippet": "LOG_CHANNEL = int(environ.get('LOG_CHANNEL', ''))" }, ...
from pyrogram import Client, filters, enums from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery from pyrogram.errors.exceptions.bad_request_400 import MessageTooLong, PeerIdInvalid from info import ADMINS, LOG_CHANNEL, SUPPORT_CHAT, MELCOW_NEW_USERS, MELCOW_VID, CHNL_LNK, GRP_LNK from database.users_chats_db import db from database.ia_filterdb import Media from utils import get_size, temp, get_settings from Script import script from pyrogram.errors import ChatAdminRequired import asyncio
20,001
"""-----------------------------------------https://t.me/TG_LINKS_CHANNEL--------------------------------------""" @Client.on_message(filters.new_chat_members & filters.group) async def save_group(bot, message): r_j_check = [u.id for u in message.new_chat_members] if temp.ME in r_j_check: if not await db.get_chat(message.chat.id): total=await bot.get_chat_members_count(message.chat.id) r_j = message.from_user.mention if message.from_user else "Anonymous" await bot.send_message(LOG_CHANNEL, script.LOG_TEXT_G.format(message.chat.title, message.chat.id, total, r_j)) await db.add_chat(message.chat.id, message.chat.title) if message.chat.id in temp.BANNED_CHATS: # Inspired from a boat of a banana tree buttons = [[ InlineKeyboardButton('Support', url=f'https://t.me/{SUPPORT_CHAT}') ]] reply_markup=InlineKeyboardMarkup(buttons) k = await message.reply( text='<b>CHAT NOT ALLOWED 🐞\n\nMy admins has restricted me from working here ! If you want to know more about it contact support..</b>', reply_markup=reply_markup, ) try: await k.pin() except: pass await bot.leave_chat(message.chat.id) return buttons = [[ InlineKeyboardButton('🔸 ᴍᴇꜱꜱᴀɢᴇ ʜᴇʀᴇ 🔹', url="https://t.me/TG_Bots_Supporter") ],[ InlineKeyboardButton('ᴄʜᴀɴɴᴇʟ', url=CHNL_LNK), InlineKeyboardButton('ɢʀᴏᴜᴘ', url=GRP_LNK) ]] reply_markup=InlineKeyboardMarkup(buttons) await message.reply_text( text=f"<b>ᴛʜᴀɴᴋ ʏᴏᴜ ꜰᴏʀ ᴀᴅᴅɪɴɢ ᴍᴇ ɪɴ {message.chat.title} ❣️\n\nᴅᴏɴ'ᴛ ꜰᴏʀɢᴇᴛ ᴛᴏ ᴍᴀᴋᴇ ᴍᴇ ᴀᴅᴍɪɴ. ɪꜰ ʏᴏᴜ ʜᴀᴠᴇ ᴀɴʏ ǫᴜᴇꜱᴛɪᴏɴꜱ & ᴅᴏᴜʙᴛꜱ ᴀʙᴏᴜᴛ ᴜꜱɪɴɢ ᴍᴇ ᴄᴏɴᴛᴀᴄᴛ ꜰʀᴏᴍ ᴀᴅᴍɪɴ & ᴍᴇꜱꜱᴀɢᴇ ʜᴇʀᴇ 👇</b>", reply_markup=reply_markup) else:
"""-----------------------------------------https://t.me/TG_LINKS_CHANNEL--------------------------------------""" @Client.on_message(filters.new_chat_members & filters.group) async def save_group(bot, message): r_j_check = [u.id for u in message.new_chat_members] if temp.ME in r_j_check: if not await db.get_chat(message.chat.id): total=await bot.get_chat_members_count(message.chat.id) r_j = message.from_user.mention if message.from_user else "Anonymous" await bot.send_message(LOG_CHANNEL, script.LOG_TEXT_G.format(message.chat.title, message.chat.id, total, r_j)) await db.add_chat(message.chat.id, message.chat.title) if message.chat.id in temp.BANNED_CHATS: # Inspired from a boat of a banana tree buttons = [[ InlineKeyboardButton('Support', url=f'https://t.me/{SUPPORT_CHAT}') ]] reply_markup=InlineKeyboardMarkup(buttons) k = await message.reply( text='<b>CHAT NOT ALLOWED 🐞\n\nMy admins has restricted me from working here ! If you want to know more about it contact support..</b>', reply_markup=reply_markup, ) try: await k.pin() except: pass await bot.leave_chat(message.chat.id) return buttons = [[ InlineKeyboardButton('🔸 ᴍᴇꜱꜱᴀɢᴇ ʜᴇʀᴇ 🔹', url="https://t.me/TG_Bots_Supporter") ],[ InlineKeyboardButton('ᴄʜᴀɴɴᴇʟ', url=CHNL_LNK), InlineKeyboardButton('ɢʀᴏᴜᴘ', url=GRP_LNK) ]] reply_markup=InlineKeyboardMarkup(buttons) await message.reply_text( text=f"<b>ᴛʜᴀɴᴋ ʏᴏᴜ ꜰᴏʀ ᴀᴅᴅɪɴɢ ᴍᴇ ɪɴ {message.chat.title} ❣️\n\nᴅᴏɴ'ᴛ ꜰᴏʀɢᴇᴛ ᴛᴏ ᴍᴀᴋᴇ ᴍᴇ ᴀᴅᴍɪɴ. ɪꜰ ʏᴏᴜ ʜᴀᴠᴇ ᴀɴʏ ǫᴜᴇꜱᴛɪᴏɴꜱ & ᴅᴏᴜʙᴛꜱ ᴀʙᴏᴜᴛ ᴜꜱɪɴɢ ᴍᴇ ᴄᴏɴᴛᴀᴄᴛ ꜰʀᴏᴍ ᴀᴅᴍɪɴ & ᴍᴇꜱꜱᴀɢᴇ ʜᴇʀᴇ 👇</b>", reply_markup=reply_markup) else:
settings = await get_settings(message.chat.id)
11
2023-11-03 12:21:26+00:00
24k
apple/ml-reed
reed/algorithms/pebble.py
[ { "identifier": "utils", "path": "BPref/utils.py", "snippet": "def make_env(cfg):\ndef ppo_make_env(env_id, seed):\ndef tie_weights(src, trg):\ndef make_metaworld_env(cfg):\ndef ppo_make_metaworld_env(env_id, seed):\n def __init__(self, *models):\n def __enter__(self):\n def __exit__(self, *arg...
import typing as t import time import numpy as np import torch import hydra from pathlib import Path from omegaconf import dictconfig, OmegaConf from BPref import utils from BPref.logger import Logger from BPref.replay_buffer import TrajectoryReplayBuffer from collections import deque from reed.models.reward_model import StateActionRewardModel from reed.data.preference_dataset import PreferenceDataset from reed.data.preference_data_loader import PreferenceTripletEnsembleDataLoader from reed.data.preprocess_images import PreProcessInference
20,852
Determine the dimensionality of the inputs to the reward model Args: observation_dim: the dimensionality of agent observations. If the observation is an image, the dimensionality should have the following order: (num_channels, height, width) action_dim: the dimensionality of agent actions Returns: the dimensionality of the reward model's inputs """ # compute the dimensions of the input to the reward function if not self.experiment_config.reward_from_image_observations: return observation_dim + action_dim else: # we need to concatenate the actions to last dimension of the image # the input to the reward net also needs to have the channels first # the image dimensions are given to us a (height, width, channels) sample_shape = list(observation_dim) if self.experiment_config.grayscale_images: num_channels = action_dim + 1 else: num_channels = sample_shape[0] + action_dim # update the number of channels sample_shape[0] = num_channels # the dimensions of the input to the reward model return sample_shape def _determine_reward_observation_dimensions(self) -> t.Union[int, np.ndarray]: """ Check if the reward will use the image observations. If so the reward input shape needs to be set accordingly Returns: the dimensionality of reward's observation space """ if self.experiment_config.reward_from_image_observations: # get a sample image rendering of the environment and get its shape self.env.reset() if "metaworld" in self.experiment_config.env: start_time = time.time() img_obs = self.env.render(camera_name=self.experiment_config.camera_name, resolution=( self.experiment_config.image_height, self.experiment_config.image_width)) end_time = time.time() print(f"Sample render time for metaworld is {end_time - start_time} seconds") else: start_time = time.time() img_obs = self.env.render(mode="rgb_array", height=self.experiment_config.image_height, width=self.experiment_config.image_width) end_time = time.time() print(f"Sample render time for DMC is {end_time - start_time} seconds") formatted_image_observation = self._reward_input_preprocessor.format_state(img_obs).squeeze(axis=0) observation_space = formatted_image_observation.shape print("--------------------------") print("--------------------------") print("--------------------------") print("image observation shape", observation_space) print("--------------------------") print("--------------------------") print("--------------------------") else: observation_space = self.env.observation_space.shape[0] return observation_space def _determine_observation_dimensions(self) -> t.Union[int, np.ndarray]: """ Check if the reward will use the image observations. If so the replay buffer needs to be set up to accumulate the image observations Returns: the dimensionality of reward's observation space """ if self.experiment_config.reward_from_image_observations: # get a sample image rendering of the environment and get its shape self.env.reset() if "metaworld" in self.experiment_config.env: start_time = time.time() img_obs = self.env.render(camera_name=self.experiment_config.camera_name, resolution=( self.experiment_config.image_height, self.experiment_config.image_width)) end_time = time.time() print(f"Sample render time for metaworld is {end_time - start_time} seconds") else: start_time = time.time() img_obs = self.env.render(mode="rgb_array", height=self.experiment_config.image_height, width=self.experiment_config.image_width) end_time = time.time() print(f"Sample render time for DMC is {end_time - start_time} seconds") observation_space = img_obs.shape print("--------------------------") print("--------------------------") print("--------------------------") print("image observation shape", observation_space) print("--------------------------") print("--------------------------") print("--------------------------") else: observation_space = self.env.observation_space.shape[0] return observation_space def _render_image_observation(self) -> np.ndarray: """ Render the current image observation """ if "metaworld" in self.experiment_config.env: img_obs = self.env.render(camera_name=self.experiment_config.camera_name, resolution=( self.experiment_config.image_height, self.experiment_config.image_width)) else: img_obs = self.env.render(mode="rgb_array", height=self.experiment_config.image_height, width=self.experiment_config.image_width) return img_obs
# # For licensing see accompanying LICENSE file. # Copyright (C) 2023 Apple Inc. All Rights Reserved. # class PEBBLE: """ Train a reward model in conjunction with policy training following the PEBBLE algorithm from (Lee et al. 2021) """ def __init__(self, experiment_config: dictconfig.DictConfig): """ Args: experiment_config: contains the configuration for the experiment to be run. Access like a dictionry """ # track the experimental configuration self.experiment_config = experiment_config # create the logger to track policy learning progress self.logger = Logger( self.experiment_config.out_dir, save_tb=self.experiment_config.log_save_tb, log_frequency=self.experiment_config.log_frequency, agent=self.experiment_config.agent.name) # used to track where we are in training # total amount of feedback the reward model has solicited self.total_feedback = 0 # total amount of feedback given to the reward model self.labeled_feedback = 0 # policy train step self.step = 0 # we need to set the random seed for replication purposes utils.set_seed_everywhere(self.experiment_config.seed) # the device on which models will be trained self.device = torch.device(self.experiment_config.device) # flag to make sure we are handling multi-gpu training where we need to self.multi_gpu = torch.cuda.device_count() > 1 print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") print(f"There is {torch.cuda.device_count()} GPU, so models will be trained with torch.nn.DataParallel.") print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") # make the environment if 'metaworld' in self.experiment_config.env: self.env = utils.make_metaworld_env(self.experiment_config) # we are not evaluating a domain where we need to log whether an agent has reached a goal state self.log_success = True else: self.env = utils.make_env(self.experiment_config) # we are not evaluating a domain where we need to log whether an agent has reached a goal state self.log_success = False print('----------------------') print('----------------------') print('----------------------') print('----------------------') print("observation space ", self.env.observation_space.shape[0]) print("action space ", self.env.action_space.shape[0]) print('----------------------') print('----------------------') print('----------------------') print('----------------------') # we need to set the policy's observation and action space self.experiment_config.agent.params.obs_dim = self.env.observation_space.shape[0] self.experiment_config.agent.params.action_dim = self.env.action_space.shape[0] self.experiment_config.agent.params.action_range = [ float(self.env.action_space.low.min()), float(self.env.action_space.high.max()) ] # create the agent specified in the configuration self.agent = hydra.utils.instantiate(self.experiment_config.agent) # the class that will format the observations and observation action pairs for consumption by the reward model self._reward_input_preprocessor = PreProcessInference( image_observations=self.experiment_config.reward_from_image_observations, grayscale_images=self.experiment_config.grayscale_images, normalize_images=self.experiment_config.normalized_images) # determine the reward's observation space # if the reward is trained on images then the reward's observation space differs from the policy's, which is # trained on the state space self._observation_dimensionality = self._determine_observation_dimensions() self._reward_observation_dimensionality = self._determine_reward_observation_dimensions() # create the agent's replay buffer setting if image observations will need to be tracked self.replay_buffer = TrajectoryReplayBuffer( int(self.experiment_config.replay_buffer_capacity), self.device, image_observations=(self._observation_dimensionality if (self.experiment_config.reward_from_image_observations or self.experiment_config.save_image_observations) else None) ) # determine the dimensionality of the input to the reward function self.reward_in_dim = self._determine_reward_input_dimensions( observation_dim=self._reward_observation_dimensionality, action_dim=self.env.action_space.shape[0]) # instantiating the reward model self.reward_model = self.construct_reward_ensemble() # create the preference dataset that will solicit and hold labelled preference triplets self.preference_dataset = PreferenceDataset( observation_dim=self._reward_observation_dimensionality, action_dim=self.env.action_space.shape[0], capacity=self.experiment_config.preference_dataset_capacity, size_segment=self.experiment_config.segment_size, out_path=Path("/tmp/preference_dataset/"), image_observations=self.experiment_config.reward_from_image_observations, state_action_formatter=self._reward_input_preprocessor, grayscale_images=self.experiment_config.grayscale_images, collect_image_pref_dataset=self.experiment_config.save_image_observations, teacher_beta=self.experiment_config.teacher_beta, teacher_gamma=self.experiment_config.teacher_gamma, teacher_eps_mistake=self.experiment_config.teacher_eps_mistake, teacher_eps_skip=self.experiment_config.teacher_eps_skip, teacher_eps_equal=self.experiment_config.teacher_eps_equal ) # save the experimental configuration with open(Path(self.experiment_config.out_dir) / "experiment_config.yaml", "w+") as f: OmegaConf.save(config=self.experiment_config, f=f) def _determine_reward_input_dimensions(self, observation_dim: t.Union[int, np.ndarray], action_dim: int) -> t.Union[int, t.Sequence]: """ Determine the dimensionality of the inputs to the reward model Args: observation_dim: the dimensionality of agent observations. If the observation is an image, the dimensionality should have the following order: (num_channels, height, width) action_dim: the dimensionality of agent actions Returns: the dimensionality of the reward model's inputs """ # compute the dimensions of the input to the reward function if not self.experiment_config.reward_from_image_observations: return observation_dim + action_dim else: # we need to concatenate the actions to last dimension of the image # the input to the reward net also needs to have the channels first # the image dimensions are given to us a (height, width, channels) sample_shape = list(observation_dim) if self.experiment_config.grayscale_images: num_channels = action_dim + 1 else: num_channels = sample_shape[0] + action_dim # update the number of channels sample_shape[0] = num_channels # the dimensions of the input to the reward model return sample_shape def _determine_reward_observation_dimensions(self) -> t.Union[int, np.ndarray]: """ Check if the reward will use the image observations. If so the reward input shape needs to be set accordingly Returns: the dimensionality of reward's observation space """ if self.experiment_config.reward_from_image_observations: # get a sample image rendering of the environment and get its shape self.env.reset() if "metaworld" in self.experiment_config.env: start_time = time.time() img_obs = self.env.render(camera_name=self.experiment_config.camera_name, resolution=( self.experiment_config.image_height, self.experiment_config.image_width)) end_time = time.time() print(f"Sample render time for metaworld is {end_time - start_time} seconds") else: start_time = time.time() img_obs = self.env.render(mode="rgb_array", height=self.experiment_config.image_height, width=self.experiment_config.image_width) end_time = time.time() print(f"Sample render time for DMC is {end_time - start_time} seconds") formatted_image_observation = self._reward_input_preprocessor.format_state(img_obs).squeeze(axis=0) observation_space = formatted_image_observation.shape print("--------------------------") print("--------------------------") print("--------------------------") print("image observation shape", observation_space) print("--------------------------") print("--------------------------") print("--------------------------") else: observation_space = self.env.observation_space.shape[0] return observation_space def _determine_observation_dimensions(self) -> t.Union[int, np.ndarray]: """ Check if the reward will use the image observations. If so the replay buffer needs to be set up to accumulate the image observations Returns: the dimensionality of reward's observation space """ if self.experiment_config.reward_from_image_observations: # get a sample image rendering of the environment and get its shape self.env.reset() if "metaworld" in self.experiment_config.env: start_time = time.time() img_obs = self.env.render(camera_name=self.experiment_config.camera_name, resolution=( self.experiment_config.image_height, self.experiment_config.image_width)) end_time = time.time() print(f"Sample render time for metaworld is {end_time - start_time} seconds") else: start_time = time.time() img_obs = self.env.render(mode="rgb_array", height=self.experiment_config.image_height, width=self.experiment_config.image_width) end_time = time.time() print(f"Sample render time for DMC is {end_time - start_time} seconds") observation_space = img_obs.shape print("--------------------------") print("--------------------------") print("--------------------------") print("image observation shape", observation_space) print("--------------------------") print("--------------------------") print("--------------------------") else: observation_space = self.env.observation_space.shape[0] return observation_space def _render_image_observation(self) -> np.ndarray: """ Render the current image observation """ if "metaworld" in self.experiment_config.env: img_obs = self.env.render(camera_name=self.experiment_config.camera_name, resolution=( self.experiment_config.image_height, self.experiment_config.image_width)) else: img_obs = self.env.render(mode="rgb_array", height=self.experiment_config.image_height, width=self.experiment_config.image_width) return img_obs
def construct_reward_ensemble(self) -> StateActionRewardModel:
3
2023-11-06 23:14:20+00:00
24k
allenai/unified-io-2
t5x/train.py
[ { "identifier": "PackingStrategy", "path": "t5x/examples/unified_io/packing.py", "snippet": "class PackingStrategy:\n \"\"\"Defines how to pack data during training and handles batch-level constraints\n from the input/target encoders\"\"\"\n\n pack_max_len: Optional[Tuple[int, int]] = None\n \"\"...
import functools import math import os import time import warnings import clu.data import jax import jax.numpy as jnp import numpy as np import seqio import tensorflow as tf import jax.profiler import gin from typing import Callable, Sequence, Mapping, Tuple, Type, Optional from t5x.examples.unified_io.packing import PackingStrategy from absl import logging from clu import metric_writers from jax import random from jax.experimental import multihost_utils from jax.experimental.global_device_array import GlobalDeviceArray from t5x import checkpoints from t5x import eval as eval_lib from t5x import models from t5x.examples.unified_io import evaluator from t5x import partitioning from t5x import train_state as train_state_lib from t5x import trainer as trainer_lib from t5x import utils from os.path import expanduser from t5x.examples.unified_io.utils import init_wandb from t5x.examples.unified_io.metrics.metrics import null_metric from t5x.examples.unified_io.data.postprocessing import return_example from absl import app from absl import flags from t5x import gin_utils
15,059
metrics_by_task: A map of metrics keyed by task name. Returns: A bool indicating whether training should be halted. Raises: RuntimeError: When the metrics processed on host 0 is None. """ stop_training = False if jax.process_index() == 0: if not metrics_by_task: raise RuntimeError('Metric is unexpectedly empty on process 0') for action in actions.get(mode, []): stop_training |= action.run(train_state, metrics_by_task=metrics_by_task) # Broadcast result from host 0 to others. return bool(multihost_utils.broadcast_one_to_all(jnp.array(stop_training))) def train( *, model: models.BaseTransformerModel, train_dataset_cfg: utils.DatasetConfig, train_eval_dataset_cfg: Optional[utils.DatasetConfig], infer_eval_dataset_cfg: Optional[utils.DatasetConfig], checkpoint_cfg: utils.CheckpointConfig, partitioner: partitioning.BasePartitioner, trainer_cls: trainer_lib.BaseTrainerConstructor, model_dir: str, total_steps: int, eval_steps: int, eval_period: int, stats_period: Optional[int] = None, random_seed: Optional[int], use_hardware_rng: bool = False, summarize_config_fn: Callable[[str, metric_writers.MetricWriter, int], None], inference_evaluator_cls: utils.EvaluatorConstructor = seqio.Evaluator, get_dataset_fn: utils.GetDatasetCallable = utils.get_dataset, concurrent_metrics: bool = True, actions: Optional[Mapping[str, Sequence[trainer_lib.BaseAction]]] = None, train_eval_get_dataset_fn: utils.GetEvalDatasetCallable = utils .get_training_eval_datasets, run_eval_before_training: bool = False, use_wandb = True, weight_metrics="norm", packing_strategy: PackingStrategy = None, train_state_initializer_cls: Type[ utils.TrainStateInitializer] = utils.TrainStateInitializer, use_gda: bool = True, verify_matching_vocabs_fn: Optional[ Callable[[utils.DatasetConfig, models.BaseTransformerModel], None]] = utils.verify_matching_vocabs, shuffle_buffer_size=None, cycle_length=None, block_length=None ) -> Tuple[int, train_state_lib.TrainState]: """Train function. Args: model: The model object to use for training. train_dataset_cfg: Specification for the dataset to train with. train_eval_dataset_cfg: Specification for the dataset to evaluate with using the train metrics and no inference (e.g., uses teacher forcing). If None, train eval is disabled. infer_eval_dataset_cfg: Specification for the dataset to evaluate with using the inference metrics (e.g., uses sampled decoding). If None, inference eval is disabled. checkpoint_cfg: Specification for saving and restoring model parameters and dataset state to/from checkpoints. partitioner: Partitioner for model parameters and data across devices. trainer_cls: An implementation of BaseTrainer. model_dir: Path of directory to store checkpoints and metric summaries. total_steps: The step number to stop training after. The number of actual steps trained in this run will be this number minus the starting step from the checkpoint. If this is set to the starting step from the checkpoint, the model will not be compiled for training and training will not be run. This can be used in conjunction with `run_eval_before_training` to only evaluate a model. eval_steps: The number of batches to process for each train-eval loop. eval_period: The number of train steps between each evaluation (both train-eval and infer-eval). stats_period: The number of train steps between writing scalar stats. If None, defaults to eval_period. random_seed: A random seed to use for dropout and initialization. If None, a fast, non-deterministic hardware-based RNG is used. use_hardware_rng: Whether to force using the RngBitGenerator based hardware rng, which takes seeds and acts similarly to software PRNG in that it should be seed-deterministic. The new RngBitGenerator custom PRNG system should be reproducible for a given sharding, but the numbers will change for different shardings of the same model. summarize_config_fn: A function that takes in the model directory, a SummaryWriter, and the step number, and writes a summary of the inference_evaluator_cls: seqio.Evaluator class to use for inference evaluation, potentially with bound configuration args. get_dataset_fn: The callable use to get the train and train-eval datasets based on the DatasetConfig and shard information. concurrent_metrics: If True, allow metrics computation and logging to overlap with training. Will likely result in additional TPU memory usage. actions: A mapping of actions that runs after train, eval or infer_eval, to inspect the model and perform useful operations, e.g., early stopping. The key must have a 1:1 mapping to ActionMode enum. For EVAL actions to actually work, this requires `concurrent_metrics` to be turned off, since chaining futures and mutating states concurrently might be error-prone. train_eval_get_dataset_fn: Optional callable use to get the train-eval datasets based on the DatasetConfig and shard information. If missing, it defaults to `utils.get_training_eval_datasets`. run_eval_before_training: If True, calculate training eval and inference eval metrics before training begins. train_state_initializer_cls: t5x.utils.TrainStateInitializer class for initializing partitioned TrainState from checkpoints or scratch. use_gda: if True, uses GlobalDeviceArray. Experimental feature. verify_matching_vocabs_fn: Function to validate whether the task vocabulary matches the model vocabulary. Should raise an exception on error. Returns: The tuple of (last_step, last_train_state). """ if jax.process_index() == 0 and use_wandb: if not os.environ.get("WANDB_API_KEY"): use_wandb = False logging.warning("WANDB_API_KEY not found, wandb will not be used") else:
# Copyright 2022 The T5X Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Script to pretrain or finetune in JAX using a SeqIO pipeline. """ # Set Linen to add profiling information when constructing Modules. # Must be set before flax imports. # pylint:disable=g-import-not-at-top os.environ['FLAX_PROFILE'] = 'true' # TODO(adarob): Re-enable once users are notified and tests are updated. os.environ['FLAX_LAZY_RNG'] = 'no' os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = os.path.join( expanduser("~"), ".config/gcloud/application_default_credentials.json") # Automatically search for gin files relative to the T5X package. _DEFAULT_GIN_SEARCH_PATHS = [ os.path.dirname(os.path.dirname(os.path.abspath(__file__))) ] PyTreeDef = type(jax.tree_util.tree_structure(None)) P = partitioning.PartitionSpec # Special key that used to distinguish train metrics. TRAIN_METRIC_KEY = 'train' # String keys that is acceptable from config. _ACTION_KEYS = frozenset(trainer_lib.ActionMode.__members__.keys()) def run_actions( mode: trainer_lib.ActionMode, actions: trainer_lib.ActionMapType, train_state: train_state_lib.TrainState, metrics_by_task: Mapping[str, trainer_lib.MetricValueMapType]) -> bool: """Invokes all actions on the given mode on host 0, then broadcasts to all. Args: mode: The mode to run the actions. e.g., if mode is `train`, only actions configured to run with `train` mode will be invoked. actions: A mapping of actions that runs after train, eval or infer_eval, to inspect the model and perform useful operations, e.g., early stopping. train_state: The current train_state of the trainer. metrics_by_task: A map of metrics keyed by task name. Returns: A bool indicating whether training should be halted. Raises: RuntimeError: When the metrics processed on host 0 is None. """ stop_training = False if jax.process_index() == 0: if not metrics_by_task: raise RuntimeError('Metric is unexpectedly empty on process 0') for action in actions.get(mode, []): stop_training |= action.run(train_state, metrics_by_task=metrics_by_task) # Broadcast result from host 0 to others. return bool(multihost_utils.broadcast_one_to_all(jnp.array(stop_training))) def train( *, model: models.BaseTransformerModel, train_dataset_cfg: utils.DatasetConfig, train_eval_dataset_cfg: Optional[utils.DatasetConfig], infer_eval_dataset_cfg: Optional[utils.DatasetConfig], checkpoint_cfg: utils.CheckpointConfig, partitioner: partitioning.BasePartitioner, trainer_cls: trainer_lib.BaseTrainerConstructor, model_dir: str, total_steps: int, eval_steps: int, eval_period: int, stats_period: Optional[int] = None, random_seed: Optional[int], use_hardware_rng: bool = False, summarize_config_fn: Callable[[str, metric_writers.MetricWriter, int], None], inference_evaluator_cls: utils.EvaluatorConstructor = seqio.Evaluator, get_dataset_fn: utils.GetDatasetCallable = utils.get_dataset, concurrent_metrics: bool = True, actions: Optional[Mapping[str, Sequence[trainer_lib.BaseAction]]] = None, train_eval_get_dataset_fn: utils.GetEvalDatasetCallable = utils .get_training_eval_datasets, run_eval_before_training: bool = False, use_wandb = True, weight_metrics="norm", packing_strategy: PackingStrategy = None, train_state_initializer_cls: Type[ utils.TrainStateInitializer] = utils.TrainStateInitializer, use_gda: bool = True, verify_matching_vocabs_fn: Optional[ Callable[[utils.DatasetConfig, models.BaseTransformerModel], None]] = utils.verify_matching_vocabs, shuffle_buffer_size=None, cycle_length=None, block_length=None ) -> Tuple[int, train_state_lib.TrainState]: """Train function. Args: model: The model object to use for training. train_dataset_cfg: Specification for the dataset to train with. train_eval_dataset_cfg: Specification for the dataset to evaluate with using the train metrics and no inference (e.g., uses teacher forcing). If None, train eval is disabled. infer_eval_dataset_cfg: Specification for the dataset to evaluate with using the inference metrics (e.g., uses sampled decoding). If None, inference eval is disabled. checkpoint_cfg: Specification for saving and restoring model parameters and dataset state to/from checkpoints. partitioner: Partitioner for model parameters and data across devices. trainer_cls: An implementation of BaseTrainer. model_dir: Path of directory to store checkpoints and metric summaries. total_steps: The step number to stop training after. The number of actual steps trained in this run will be this number minus the starting step from the checkpoint. If this is set to the starting step from the checkpoint, the model will not be compiled for training and training will not be run. This can be used in conjunction with `run_eval_before_training` to only evaluate a model. eval_steps: The number of batches to process for each train-eval loop. eval_period: The number of train steps between each evaluation (both train-eval and infer-eval). stats_period: The number of train steps between writing scalar stats. If None, defaults to eval_period. random_seed: A random seed to use for dropout and initialization. If None, a fast, non-deterministic hardware-based RNG is used. use_hardware_rng: Whether to force using the RngBitGenerator based hardware rng, which takes seeds and acts similarly to software PRNG in that it should be seed-deterministic. The new RngBitGenerator custom PRNG system should be reproducible for a given sharding, but the numbers will change for different shardings of the same model. summarize_config_fn: A function that takes in the model directory, a SummaryWriter, and the step number, and writes a summary of the inference_evaluator_cls: seqio.Evaluator class to use for inference evaluation, potentially with bound configuration args. get_dataset_fn: The callable use to get the train and train-eval datasets based on the DatasetConfig and shard information. concurrent_metrics: If True, allow metrics computation and logging to overlap with training. Will likely result in additional TPU memory usage. actions: A mapping of actions that runs after train, eval or infer_eval, to inspect the model and perform useful operations, e.g., early stopping. The key must have a 1:1 mapping to ActionMode enum. For EVAL actions to actually work, this requires `concurrent_metrics` to be turned off, since chaining futures and mutating states concurrently might be error-prone. train_eval_get_dataset_fn: Optional callable use to get the train-eval datasets based on the DatasetConfig and shard information. If missing, it defaults to `utils.get_training_eval_datasets`. run_eval_before_training: If True, calculate training eval and inference eval metrics before training begins. train_state_initializer_cls: t5x.utils.TrainStateInitializer class for initializing partitioned TrainState from checkpoints or scratch. use_gda: if True, uses GlobalDeviceArray. Experimental feature. verify_matching_vocabs_fn: Function to validate whether the task vocabulary matches the model vocabulary. Should raise an exception on error. Returns: The tuple of (last_step, last_train_state). """ if jax.process_index() == 0 and use_wandb: if not os.environ.get("WANDB_API_KEY"): use_wandb = False logging.warning("WANDB_API_KEY not found, wandb will not be used") else:
init_wandb()
9
2023-12-12 20:23:33+00:00
24k
alibaba/animate-anything
train.py
[ { "identifier": "VideoJsonDataset", "path": "utils/dataset.py", "snippet": "class VideoJsonDataset(Dataset):\n def __init__(\n self,\n tokenizer=None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 16,\n fps: int = 8,\n video_dir: st...
import argparse import datetime import logging import inspect import math import os import json import gc import copy import random import cv2 import torch import torch.nn.functional as F import torch.utils.checkpoint import torchvision.transforms as T import diffusers import transformers import numpy as np import imageio import itertools import bitsandbytes as bnb from typing import Dict, Optional, Tuple from omegaconf import OmegaConf from tqdm.auto import tqdm from PIL import Image from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers.models import AutoencoderKL from diffusers import DPMSolverMultistepScheduler, DDPMScheduler from diffusers.image_processor import VaeImageProcessor from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version, export_to_video from diffusers.utils.import_utils import is_xformers_available from diffusers.models.attention_processor import AttnProcessor2_0, Attention from diffusers.models.attention import BasicTransformerBlock from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth import tensor2vid from transformers import CLIPTextModel, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPEncoder from utils.dataset import VideoJsonDataset, SingleVideoDataset, \ ImageDataset, VideoFolderDataset, CachedDataset, VideoBLIPDataset from einops import rearrange, repeat from models.unet_3d_condition_mask import UNet3DConditionModel from models.pipeline import LatentToVideoPipeline from utils.lora_handler import LoraHandler, LORA_VERSIONS from utils.common import read_mask, generate_random_mask, slerp, calculate_motion_score, \ read_video, calculate_motion_precision, calculate_latent_motion_score, \ DDPM_forward, DDPM_forward_timesteps, DDPM_forward_mask, motion_mask_loss, \ generate_center_mask, tensor_to_vae_latent from xformers.ops import MemoryEfficientAttentionFlashAttentionOp
17,639
already_printed_trainables = False logger = get_logger(__name__, log_level="INFO") def create_logging(logging, logger, accelerator): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) def accelerate_set_verbose(accelerator): if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() def get_train_dataset(dataset_types, train_data, tokenizer): train_datasets = []
already_printed_trainables = False logger = get_logger(__name__, log_level="INFO") def create_logging(logging, logger, accelerator): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) def accelerate_set_verbose(accelerator): if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() def get_train_dataset(dataset_types, train_data, tokenizer): train_datasets = []
dataset_cls = [VideoJsonDataset, SingleVideoDataset, ImageDataset, VideoFolderDataset, VideoBLIPDataset]
5
2023-12-07 08:26:29+00:00
24k
allenai/Holodeck
modules/object_selector.py
[ { "identifier": "DFS_Solver_Floor", "path": "modules/floor_objects.py", "snippet": "class DFS_Solver_Floor():\n def __init__(self, grid_size, random_seed=0, max_duration=5, constraint_bouns=0.2):\n self.grid_size = grid_size\n self.random_seed = random_seed\n self.max_duration = ...
import re import copy import json import torch import random import multiprocessing import torch.nn.functional as F import modules.prompts as prompts from typing import Dict from colorama import Fore from shapely import Polygon from langchain import PromptTemplate from modules.floor_objects import DFS_Solver_Floor from modules.wall_objects import DFS_Solver_Wall
16,351
valid_candidates = [] for candidate in candidates: dimension = self.database[candidate[0]]["assetMetadata"]["boundingBox"] size = [dimension["x"], dimension["y"], dimension["z"]] if size[2] > size[0]: size = [size[2], size[1], size[0]] # make sure that x > z if size[0] > room_size[0] * self.object_size_tolerance: continue if size[1] > room_size[1] * self.object_size_tolerance: continue if size[2] > room_size[2] * self.object_size_tolerance: continue if size[0] * size[2] > room_size[0] * room_size[2] * 0.5: continue # TODO: consider using the floor area instead of the room area valid_candidates.append(candidate) return valid_candidates def check_thin_object(self, candidates): valid_candidates = [] for candidate in candidates: dimension = self.database[candidate[0]]["assetMetadata"]["boundingBox"] size = [dimension["x"], dimension["y"], dimension["z"]] if size[2] > min(size[0], size[1]) * self.thin_threshold: continue valid_candidates.append(candidate) return valid_candidates def random_select(self, candidates): if self.random_selection: selected_candidate = random.choice(candidates) else: scores = [candidate[1] for candidate in candidates] scores_tensor = torch.Tensor(scores) probas = F.softmax(scores_tensor, dim=0) # TODO: consider using normalized scores selected_index = torch.multinomial(probas, 1).item() selected_candidate = candidates[selected_index] return selected_candidate def update_floor_capacity(self, room2floor_capacity, scene): for room in scene["rooms"]: room_vertices = room["vertices"] room_poly = Polygon(room_vertices) for door in scene["doors"]: for door_vertices in door["doorBoxes"]: door_poly = Polygon(door_vertices) door_center = door_poly.centroid door_area = door_poly.area if room_poly.contains(door_center): room2floor_capacity[room["id"]][1] += door_area * 0.6 if scene["open_walls"] != []: for open_wall_vertices in scene["open_walls"]["openWallBoxes"]: open_wall_poly = Polygon(open_wall_vertices) open_wall_center = open_wall_poly.centroid if room_poly.contains(open_wall_center): room2floor_capacity[room["id"]][1] += open_wall_poly.area * 0.6 return room2floor_capacity def update_wall_capacity(self, room2wall_capacity, scene): for room in scene["rooms"]: room_vertices = room["vertices"] room_poly = Polygon(room_vertices) for window in scene["windows"]: for window_vertices in window["windowBoxes"]: window_poly = Polygon(window_vertices) window_center = window_poly.centroid window_x = window_poly.bounds[2] - window_poly.bounds[0] window_y = window_poly.bounds[3] - window_poly.bounds[1] window_width = max(window_x, window_y) if room_poly.contains(window_center): room2wall_capacity[room["id"]][1] += window_width * 0.6 if scene["open_walls"] != []: for open_wall_vertices in scene["open_walls"]["openWallBoxes"]: open_wall_poly = Polygon(open_wall_vertices) open_wall_center = open_wall_poly.centroid open_wall_x = open_wall_poly.bounds[2] - open_wall_poly.bounds[0] open_wall_y = open_wall_poly.bounds[3] - open_wall_poly.bounds[1] open_wall_width = max(open_wall_x, open_wall_y) if room_poly.contains(open_wall_center): room2wall_capacity[room["id"]][1] += open_wall_width * 0.6 return room2wall_capacity def check_floor_placement(self, candidates, room_vertices, scene): room_x = max([vertex[0] for vertex in room_vertices]) - min([vertex[0] for vertex in room_vertices]) room_z = max([vertex[1] for vertex in room_vertices]) - min([vertex[1] for vertex in room_vertices]) grid_size = int(max(room_x // 20, room_z // 20)) solver = DFS_Solver_Floor(grid_size=grid_size) room_poly = Polygon(room_vertices) initial_state = self.get_initial_state_floor(room_vertices, scene, add_window=False) grid_points = solver.create_grids(room_poly) grid_points = solver.remove_points(grid_points, initial_state) valid_candidates = [] for candidate in candidates: object_size = self.database[candidate[0]]["assetMetadata"]["boundingBox"] object_dim = (object_size["x"]*100 + self.size_buffer, object_size["z"]*100 + self.size_buffer) solutions = solver.get_all_solutions(room_poly, grid_points, object_dim) solutions = solver.filter_collision(initial_state, solutions) solutions = solver.place_edge(room_poly, solutions, object_dim) if solutions != []: valid_candidates.append(candidate) else: print(f"Floor Object {candidate[0]} (size: {object_dim}) cannot be placed in room"); continue return valid_candidates def check_wall_placement(self, candidates, room_vertices, scene): room_x = max([vertex[0] for vertex in room_vertices]) - min([vertex[0] for vertex in room_vertices]) room_z = max([vertex[1] for vertex in room_vertices]) - min([vertex[1] for vertex in room_vertices]) grid_size = int(max(room_x // 20, room_z // 20))
class ObjectSelector: def __init__(self, object_retriever, llm): # object retriever self.object_retriever = object_retriever self.database = object_retriever.database # language model and prompt templates self.llm = llm self.object_selection_template_1 = prompts.object_selection_prompt_new_1 self.object_selection_template_2 = PromptTemplate(input_variables=["object_selection_prompt_new_1", "object_selection_1", "room"], template=prompts.object_selection_prompt_new_2) # hyperparameters self.floor_capacity_ratio = 0.4 self.wall_capacity_ratio = 0.5 self.object_size_tolerance = 0.8 self.similarity_threshold_floor = 31 # need to be tuned self.similarity_threshold_wall = 31 # need to be tuned self.thin_threshold = 3 self.used_assets = [] self.consider_size = True self.size_buffer = 10 self.random_selection = False self.reuse_selection = False self.multiprocessing = True def select_objects(self, scene, additional_requirements="N/A"): rooms_types = [room["roomType"] for room in scene["rooms"]] room2area = {room["roomType"]: self.get_room_area(room) for room in scene["rooms"]} room2size = {room["roomType"]: self.get_room_size(room, scene["wall_height"]) for room in scene["rooms"]} room2perimeter = {room["roomType"]: self.get_room_perimeter(room) for room in scene["rooms"]} room2vertices = {room["roomType"]: [(x * 100, y * 100) for (x, y) in room["vertices"]] for room in scene["rooms"]} room2floor_capacity = {room_type: [room_area * self.floor_capacity_ratio, 0] for room_type, room_area in room2area.items()} room2floor_capacity = self.update_floor_capacity(room2floor_capacity, scene) room2wall_capacity = {room_type: [room_perimeter * self.wall_capacity_ratio, 0] for room_type, room_perimeter in room2perimeter.items()} selected_objects = {room["roomType"]: {"floor": [], "wall": []} for room in scene["rooms"]} if "object_selection_plan" in scene: object_selection_plan = scene["object_selection_plan"] if self.reuse_selection: selected_objects = scene["selected_objects"] else: for room_type in rooms_types: floor_objects, _, wall_objects, _ = self.get_objects_by_room(object_selection_plan[room_type], scene, room2size[room_type], room2floor_capacity[room_type], room2wall_capacity[room_type], room2vertices[room_type]) selected_objects[room_type]["floor"] = floor_objects selected_objects[room_type]["wall"] = wall_objects else: object_selection_plan = {room["roomType"]: [] for room in scene["rooms"]} packed_args = [(room_type, scene, additional_requirements, room2size, room2floor_capacity, room2wall_capacity, room2vertices) for room_type in rooms_types] if self.multiprocessing: pool = multiprocessing.Pool(processes=4) results = pool.map(self.plan_room, packed_args) pool.close() pool.join() else: results = [self.plan_room(args) for args in packed_args] for room_type, result in results: selected_objects[room_type]["floor"] = result["floor"] selected_objects[room_type]["wall"] = result["wall"] object_selection_plan[room_type] = result["plan"] print(f"\n{Fore.GREEN}AI: Here is the object selection plan:\n{object_selection_plan}{Fore.RESET}") return object_selection_plan, selected_objects def plan_room(self, args): room_type, scene, additional_requirements, room2size, room2floor_capacity, room2wall_capacity, room2vertices = args print(f"\n{Fore.GREEN}AI: Selecting objects for {room_type}...{Fore.RESET}\n") result = {} room_size_str = f"{int(room2size[room_type][0])*100}cm in length, {int(room2size[room_type][1])*100}cm in width, {int(room2size[room_type][2])*100}cm in height" prompt_1 = self.object_selection_template_1.replace("INPUT", scene["query"]).replace("ROOM_TYPE", room_type).replace("ROOM_SIZE", room_size_str).replace("REQUIREMENTS", additional_requirements) # print(f"\nUser: {prompt_1}\n") output_1 = self.llm(prompt_1).lower() plan_1 = self.extract_json(output_1) if plan_1 is None: print(f"Error while extracting the JSON for {room_type}.") return result floor_objects, floor_capacity, wall_objects, wall_capacity = self.get_objects_by_room(plan_1, scene, room2size[room_type], room2floor_capacity[room_type], room2wall_capacity[room_type], room2vertices[room_type]) if floor_capacity[1] / floor_capacity[0] >= 0.8: result["floor"] = floor_objects result["wall"] = wall_objects result["plan"] = plan_1 else: print(f"{Fore.RED}AI: The floor capacity of {room_type} is {floor_capacity[1]}m^2, which is less than 70% of the total floor capacity {floor_capacity[0]}m^2.{Fore.RESET}") prompt_2 = self.object_selection_template_2.format(object_selection_prompt_new_1=prompt_1, object_selection_1=output_1, room=room_type) output_2 = self.llm(prompt_2).lower() plan_2 = self.extract_json(output_2) new_plan = copy.deepcopy(plan_1) for object in plan_2: new_plan[object] = plan_2[object] floor_objects, _, wall_objects, _ = self.get_objects_by_room(new_plan, scene, room2size[room_type], room2floor_capacity[room_type], room2wall_capacity[room_type], room2vertices[room_type]) result["floor"] = floor_objects result["wall"] = wall_objects result["plan"] = new_plan return room_type, result def extract_json(self, input_string): # Using regex to identify the JSON structure in the string json_match = re.search(r'{.*}', input_string, re.DOTALL) if json_match: extracted_json = json_match.group(0) try: # Convert the extracted JSON string into a Python dictionary json_dict = json.loads(extracted_json) json_dict = self.check_dict(json_dict) return json_dict except json.JSONDecodeError: print(input_string) print("Error while decoding the JSON.") return None else: print("No valid JSON found.") return None def check_dict(self, dict): valid = True attributes = ["description", "location", "size", "quantity", "variance_type", "objects_on_top"] for key, value in dict.items(): if not isinstance(key, str): valid = False; break if not isinstance(value, Dict): valid = False; break for attribute in attributes: if attribute not in value: valid = False; break if not isinstance(value["description"], str): valid = False; break if value["location"] not in ["floor", "wall"]: dict[key]["location"] = "floor" if not isinstance(value["size"], list) or len(value["size"]) != 3 or not all(isinstance(i, int) for i in value["size"]): dict[key]["size"] = None if not isinstance(value["quantity"], int): dict[key]["quantity"] = 1 if not isinstance(value["variance_type"], str) or value["variance_type"] not in ["same", "varied"]: dict[key]["variance_type"] = "same" if not isinstance(value["objects_on_top"], list): dict[key]["objects_on_top"] = [] for i, child in enumerate(value["objects_on_top"]): if not isinstance(child, Dict): valid = False; break for attribute in ["object_name", "quantity", "variance_type"]: if attribute not in child: valid = False; break if not isinstance(child["object_name"], str): valid = False; break if not isinstance(child["quantity"], int): dict[key]["objects_on_top"][i]["quantity"] = 1 if not isinstance(child["variance_type"], str) or child["variance_type"] not in ["same", "varied"]: dict[key]["objects_on_top"][i]["variance_type"] = "same" if not valid: return None else: return dict def get_objects_by_room(self, parsed_plan, scene, room_size, floor_capacity, wall_capacity, vertices): # get the floor and wall objects floor_object_list = [] wall_object_list = [] for object_name, object_info in parsed_plan.items(): object_info["object_name"] = object_name if object_info["location"] == "floor": floor_object_list.append(object_info) else: wall_object_list.append(object_info) floor_objects, floor_capacity = self.get_floor_objects(floor_object_list, floor_capacity, room_size, vertices, scene) wall_objects, wall_capacity = self.get_wall_objects(wall_object_list, wall_capacity, room_size, vertices, scene) return floor_objects, floor_capacity, wall_objects, wall_capacity def get_room_size(self, room, wall_height): floor_polygon = room["floorPolygon"] x_values = [point['x'] for point in floor_polygon] z_values = [point['z'] for point in floor_polygon] x_dim = max(x_values) - min(x_values) z_dim = max(z_values) - min(z_values) if x_dim > z_dim: return (x_dim, wall_height, z_dim) else: return (z_dim, wall_height, x_dim) def get_room_area(self, room): room_vertices = room["vertices"] room_polygon = Polygon(room_vertices) return room_polygon.area def get_room_perimeter(self, room): room_vertices = room["vertices"] room_polygon = Polygon(room_vertices) return room_polygon.length def get_floor_objects(self, floor_object_list, floor_capacity, room_size, room_vertices, scene): selected_floor_objects_all = [] for floor_object in floor_object_list: object_type = floor_object["object_name"] object_description = floor_object["description"] object_size = floor_object["size"] quantity = min(floor_object["quantity"], 10) variance_type = floor_object["variance_type"] candidates = self.object_retriever.retrieve([f"a 3D model of {object_type}, {object_description}"], self.similarity_threshold_floor) # check on floor objects candidates = [candidate for candidate in candidates if self.database[candidate[0]]["annotations"]["onFloor"] == True] # only select objects on the floor candidates = [candidate for candidate in candidates if self.database[candidate[0]]["annotations"]["onCeiling"] == False] # only select objects not on the ceiling # ignore doors and windows and frames candidates = [candidate for candidate in candidates if "door" not in self.database[candidate[0]]["annotations"]["category"].lower()] candidates = [candidate for candidate in candidates if "window" not in self.database[candidate[0]]["annotations"]["category"].lower()] candidates = [candidate for candidate in candidates if "frame" not in self.database[candidate[0]]["annotations"]["category"].lower()] # check if the object is too big candidates = self.check_object_size(candidates, room_size) # check if object can be placed on the floor candidates = self.check_floor_placement(candidates[:20], room_vertices, scene) # No candidates found if len(candidates) == 0: print("No candidates found for {} {}".format(object_type, object_description)); continue # remove used assets top_one_candidate = candidates[0] if len(candidates) > 1: candidates = [candidate for candidate in candidates if candidate[0] not in self.used_assets] if len(candidates) == 0: candidates = [top_one_candidate] # consider object size difference if object_size is not None and self.consider_size: candidates = self.object_retriever.compute_size_difference(object_size, candidates) candidates = candidates[:10] # only select top 10 candidates selected_asset_ids = [] if variance_type == "same": selected_candidate = self.random_select(candidates) selected_asset_id = selected_candidate[0] selected_asset_ids = [selected_asset_id] * quantity elif variance_type == "varied": for i in range(quantity): selected_candidate = self.random_select(candidates) selected_asset_id = selected_candidate[0] selected_asset_ids.append(selected_asset_id) if len(candidates) > 1: candidates.remove(selected_candidate) for i in range(quantity): selected_asset_id = selected_asset_ids[i] object_name = f"{object_type}-{i}" selected_floor_objects_all.append((object_name, selected_asset_id)) # reselect objects if they exceed floor capacity, consider the diversity of objects selected_floor_objects = [] while True: if len(selected_floor_objects_all) == 0: break current_selected_asset_ids = [] current_number_of_objects = len(selected_floor_objects) for object_name, selected_asset_id in selected_floor_objects_all: if selected_asset_id not in current_selected_asset_ids: selected_asset_size = self.database[selected_asset_id]["assetMetadata"]["boundingBox"] selected_asset_capacity = selected_asset_size["x"] * selected_asset_size["z"] if floor_capacity[1] + selected_asset_capacity > floor_capacity[0] and len(selected_floor_objects) > 0: print(f"{object_type} {object_description} exceeds floor capacity") else: current_selected_asset_ids.append(selected_asset_id) selected_floor_objects.append((object_name, selected_asset_id)) selected_floor_objects_all.remove((object_name, selected_asset_id)) floor_capacity = (floor_capacity[0], floor_capacity[1] + selected_asset_capacity) if len(selected_floor_objects) == current_number_of_objects: print("No more objects can be added"); break # sort objects by object type object_type2objects = {} for object_name, selected_asset_id in selected_floor_objects: object_type = object_name.split("-")[0] if object_type not in object_type2objects: object_type2objects[object_type] = [] object_type2objects[object_type].append((object_name, selected_asset_id)) selected_floor_objects_ordered = [] for object_type in object_type2objects: selected_floor_objects_ordered += sorted(object_type2objects[object_type]) return selected_floor_objects_ordered, floor_capacity def get_wall_objects(self, wall_object_list, wall_capacity, room_size, room_vertices, scene): selected_wall_objects_all = [] for wall_object in wall_object_list: object_type = wall_object["object_name"] object_description = wall_object["description"] object_size = wall_object["size"] quantity = min(wall_object["quantity"], 10) variance_type = wall_object["variance_type"] candidates = self.object_retriever.retrieve([f"a 3D model of {object_type}, {object_description}"], self.similarity_threshold_wall) # check on wall objects candidates = [candidate for candidate in candidates if self.database[candidate[0]]["annotations"]["onWall"] == True] # only select objects on the wall # ignore doors and windows candidates = [candidate for candidate in candidates if "door" not in self.database[candidate[0]]["annotations"]["category"].lower()] candidates = [candidate for candidate in candidates if "window" not in self.database[candidate[0]]["annotations"]["category"].lower()] # check if the object is too big candidates = self.check_object_size(candidates, room_size) # check thin objects candidates = self.check_thin_object(candidates) # check if object can be placed on the wall candidates = self.check_wall_placement(candidates[:20], room_vertices, scene) if len(candidates) == 0: print("No candidates found for {} {}".format(object_type, object_description)); continue # remove used assets top_one_candidate = candidates[0] if len(candidates) > 1: candidates = [candidate for candidate in candidates if candidate[0] not in self.used_assets] if len(candidates) == 0: candidates = [top_one_candidate] # consider object size difference if object_size is not None and self.consider_size: candidates = self.object_retriever.compute_size_difference(object_size, candidates) candidates = candidates[:10] # only select top 10 candidates selected_asset_ids = [] if variance_type == "same": selected_candidate = self.random_select(candidates) selected_asset_id = selected_candidate[0] selected_asset_ids = [selected_asset_id] * quantity elif variance_type == "varied": for i in range(quantity): selected_candidate = self.random_select(candidates) selected_asset_id = selected_candidate[0] selected_asset_ids.append(selected_asset_id) if len(candidates) > 1: candidates.remove(selected_candidate) for i in range(quantity): selected_asset_id = selected_asset_ids[i] object_name = f"{object_type}-{i}" selected_wall_objects_all.append((object_name, selected_asset_id)) # reselect objects if they exceed wall capacity, consider the diversity of objects selected_wall_objects = [] while True: if len(selected_wall_objects_all) == 0: break current_selected_asset_ids = [] current_number_of_objects = len(selected_wall_objects) for object_name, selected_asset_id in selected_wall_objects_all: if selected_asset_id not in current_selected_asset_ids: selected_asset_size = self.database[selected_asset_id]["assetMetadata"]["boundingBox"] selected_asset_capacity = selected_asset_size["x"] if wall_capacity[1] + selected_asset_capacity > wall_capacity[0] and len(selected_wall_objects) > 0: print(f"{object_type} {object_description} exceeds wall capacity") else: current_selected_asset_ids.append(selected_asset_id) selected_wall_objects.append((object_name, selected_asset_id)) selected_wall_objects_all.remove((object_name, selected_asset_id)) wall_capacity = (wall_capacity[0], wall_capacity[1] + selected_asset_capacity) if len(selected_wall_objects) == current_number_of_objects: print("No more objects can be added"); break # sort objects by object type object_type2objects = {} for object_name, selected_asset_id in selected_wall_objects: object_type = object_name.split("-")[0] if object_type not in object_type2objects: object_type2objects[object_type] = [] object_type2objects[object_type].append((object_name, selected_asset_id)) selected_wall_objects_ordered = [] for object_type in object_type2objects: selected_wall_objects_ordered += sorted(object_type2objects[object_type]) return selected_wall_objects_ordered, wall_capacity def check_object_size(self, candidates, room_size): valid_candidates = [] for candidate in candidates: dimension = self.database[candidate[0]]["assetMetadata"]["boundingBox"] size = [dimension["x"], dimension["y"], dimension["z"]] if size[2] > size[0]: size = [size[2], size[1], size[0]] # make sure that x > z if size[0] > room_size[0] * self.object_size_tolerance: continue if size[1] > room_size[1] * self.object_size_tolerance: continue if size[2] > room_size[2] * self.object_size_tolerance: continue if size[0] * size[2] > room_size[0] * room_size[2] * 0.5: continue # TODO: consider using the floor area instead of the room area valid_candidates.append(candidate) return valid_candidates def check_thin_object(self, candidates): valid_candidates = [] for candidate in candidates: dimension = self.database[candidate[0]]["assetMetadata"]["boundingBox"] size = [dimension["x"], dimension["y"], dimension["z"]] if size[2] > min(size[0], size[1]) * self.thin_threshold: continue valid_candidates.append(candidate) return valid_candidates def random_select(self, candidates): if self.random_selection: selected_candidate = random.choice(candidates) else: scores = [candidate[1] for candidate in candidates] scores_tensor = torch.Tensor(scores) probas = F.softmax(scores_tensor, dim=0) # TODO: consider using normalized scores selected_index = torch.multinomial(probas, 1).item() selected_candidate = candidates[selected_index] return selected_candidate def update_floor_capacity(self, room2floor_capacity, scene): for room in scene["rooms"]: room_vertices = room["vertices"] room_poly = Polygon(room_vertices) for door in scene["doors"]: for door_vertices in door["doorBoxes"]: door_poly = Polygon(door_vertices) door_center = door_poly.centroid door_area = door_poly.area if room_poly.contains(door_center): room2floor_capacity[room["id"]][1] += door_area * 0.6 if scene["open_walls"] != []: for open_wall_vertices in scene["open_walls"]["openWallBoxes"]: open_wall_poly = Polygon(open_wall_vertices) open_wall_center = open_wall_poly.centroid if room_poly.contains(open_wall_center): room2floor_capacity[room["id"]][1] += open_wall_poly.area * 0.6 return room2floor_capacity def update_wall_capacity(self, room2wall_capacity, scene): for room in scene["rooms"]: room_vertices = room["vertices"] room_poly = Polygon(room_vertices) for window in scene["windows"]: for window_vertices in window["windowBoxes"]: window_poly = Polygon(window_vertices) window_center = window_poly.centroid window_x = window_poly.bounds[2] - window_poly.bounds[0] window_y = window_poly.bounds[3] - window_poly.bounds[1] window_width = max(window_x, window_y) if room_poly.contains(window_center): room2wall_capacity[room["id"]][1] += window_width * 0.6 if scene["open_walls"] != []: for open_wall_vertices in scene["open_walls"]["openWallBoxes"]: open_wall_poly = Polygon(open_wall_vertices) open_wall_center = open_wall_poly.centroid open_wall_x = open_wall_poly.bounds[2] - open_wall_poly.bounds[0] open_wall_y = open_wall_poly.bounds[3] - open_wall_poly.bounds[1] open_wall_width = max(open_wall_x, open_wall_y) if room_poly.contains(open_wall_center): room2wall_capacity[room["id"]][1] += open_wall_width * 0.6 return room2wall_capacity def check_floor_placement(self, candidates, room_vertices, scene): room_x = max([vertex[0] for vertex in room_vertices]) - min([vertex[0] for vertex in room_vertices]) room_z = max([vertex[1] for vertex in room_vertices]) - min([vertex[1] for vertex in room_vertices]) grid_size = int(max(room_x // 20, room_z // 20)) solver = DFS_Solver_Floor(grid_size=grid_size) room_poly = Polygon(room_vertices) initial_state = self.get_initial_state_floor(room_vertices, scene, add_window=False) grid_points = solver.create_grids(room_poly) grid_points = solver.remove_points(grid_points, initial_state) valid_candidates = [] for candidate in candidates: object_size = self.database[candidate[0]]["assetMetadata"]["boundingBox"] object_dim = (object_size["x"]*100 + self.size_buffer, object_size["z"]*100 + self.size_buffer) solutions = solver.get_all_solutions(room_poly, grid_points, object_dim) solutions = solver.filter_collision(initial_state, solutions) solutions = solver.place_edge(room_poly, solutions, object_dim) if solutions != []: valid_candidates.append(candidate) else: print(f"Floor Object {candidate[0]} (size: {object_dim}) cannot be placed in room"); continue return valid_candidates def check_wall_placement(self, candidates, room_vertices, scene): room_x = max([vertex[0] for vertex in room_vertices]) - min([vertex[0] for vertex in room_vertices]) room_z = max([vertex[1] for vertex in room_vertices]) - min([vertex[1] for vertex in room_vertices]) grid_size = int(max(room_x // 20, room_z // 20))
solver = DFS_Solver_Wall(grid_size=grid_size)
1
2023-12-08 19:19:57+00:00
24k
modelscope/richdreamer
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
import numpy as np import os import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import (BaseExplicitGeometry, BaseGeometry, contract_to_unisphere,) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast, get_rank from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
15,743
# scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh = other.isosurface() # instance.isosurface_bbox = mesh.extras["bbox"] instance.isosurface_bbox = mesh.extras["bbox"] * instance.cfg.nerf_scale instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 nerf_scale: float = 1.0 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False # sdf_bias: Union[float, str] = 0.0 # sdf_bias_params: Optional[Any] = None cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh = other.isosurface() # instance.isosurface_bbox = mesh.extras["bbox"] instance.isosurface_bbox = mesh.extras["bbox"] * instance.cfg.nerf_scale instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
elif isinstance(other, ImplicitSDF):
3
2023-12-06 07:53:11+00:00
24k
rehg-lab/RAVE
annotator/oneformer/detectron2/modeling/meta_arch/retinanet.py
[ { "identifier": "configurable", "path": "annotator/oneformer/detectron2/config/config.py", "snippet": "def configurable(init_func=None, *, from_config=None):\r\n \"\"\"\r\n Decorate a function or a class's __init__ method so that it can be called\r\n with a :class:`CfgNode` object using a :func...
import logging import math import torch from typing import List, Tuple from fvcore.nn import sigmoid_focal_loss_jit from torch import Tensor, nn from torch.nn import functional as F from annotator.oneformer.detectron2.config import configurable from annotator.oneformer.detectron2.layers import CycleBatchNormList, ShapeSpec, batched_nms, cat, get_norm from annotator.oneformer.detectron2.structures import Boxes, ImageList, Instances, pairwise_iou from annotator.oneformer.detectron2.utils.events import get_event_storage from ..anchor_generator import build_anchor_generator from ..backbone import Backbone, build_backbone from ..box_regression import Box2BoxTransform, _dense_box_regression_loss from ..matcher import Matcher from .build import META_ARCH_REGISTRY from .dense_detector import DenseDetector, permute_to_N_HWA_K # noqa
16,849
# Vis parameters self.vis_period = vis_period self.input_format = input_format @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) backbone_shape = backbone.output_shape() feature_shapes = [backbone_shape[f] for f in cfg.MODEL.RETINANET.IN_FEATURES] head = RetinaNetHead(cfg, feature_shapes) anchor_generator = build_anchor_generator(cfg, feature_shapes) return { "backbone": backbone, "head": head, "anchor_generator": anchor_generator, "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.RETINANET.BBOX_REG_WEIGHTS), "anchor_matcher": Matcher( cfg.MODEL.RETINANET.IOU_THRESHOLDS, cfg.MODEL.RETINANET.IOU_LABELS, allow_low_quality_matches=True, ), "pixel_mean": cfg.MODEL.PIXEL_MEAN, "pixel_std": cfg.MODEL.PIXEL_STD, "num_classes": cfg.MODEL.RETINANET.NUM_CLASSES, "head_in_features": cfg.MODEL.RETINANET.IN_FEATURES, # Loss parameters: "focal_loss_alpha": cfg.MODEL.RETINANET.FOCAL_LOSS_ALPHA, "focal_loss_gamma": cfg.MODEL.RETINANET.FOCAL_LOSS_GAMMA, "smooth_l1_beta": cfg.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA, "box_reg_loss_type": cfg.MODEL.RETINANET.BBOX_REG_LOSS_TYPE, # Inference parameters: "test_score_thresh": cfg.MODEL.RETINANET.SCORE_THRESH_TEST, "test_topk_candidates": cfg.MODEL.RETINANET.TOPK_CANDIDATES_TEST, "test_nms_thresh": cfg.MODEL.RETINANET.NMS_THRESH_TEST, "max_detections_per_image": cfg.TEST.DETECTIONS_PER_IMAGE, # Vis parameters "vis_period": cfg.VIS_PERIOD, "input_format": cfg.INPUT.FORMAT, } def forward_training(self, images, features, predictions, gt_instances): # Transpose the Hi*Wi*A dimension to the middle: pred_logits, pred_anchor_deltas = self._transpose_dense_predictions( predictions, [self.num_classes, 4] ) anchors = self.anchor_generator(features) gt_labels, gt_boxes = self.label_anchors(anchors, gt_instances) return self.losses(anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes) def losses(self, anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes): """ Args: anchors (list[Boxes]): a list of #feature level Boxes gt_labels, gt_boxes: see output of :meth:`RetinaNet.label_anchors`. Their shapes are (N, R) and (N, R, 4), respectively, where R is the total number of anchors across levels, i.e. sum(Hi x Wi x Ai) pred_logits, pred_anchor_deltas: both are list[Tensor]. Each element in the list corresponds to one level and has shape (N, Hi * Wi * Ai, K or 4). Where K is the number of classes used in `pred_logits`. Returns: dict[str, Tensor]: mapping from a named loss to a scalar tensor storing the loss. Used during training only. The dict keys are: "loss_cls" and "loss_box_reg" """ num_images = len(gt_labels) gt_labels = torch.stack(gt_labels) # (N, R) valid_mask = gt_labels >= 0 pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes) num_pos_anchors = pos_mask.sum().item() get_event_storage().put_scalar("num_pos_anchors", num_pos_anchors / num_images) normalizer = self._ema_update("loss_normalizer", max(num_pos_anchors, 1), 100) # classification and regression loss gt_labels_target = F.one_hot(gt_labels[valid_mask], num_classes=self.num_classes + 1)[ :, :-1 ] # no loss for the last (background) class loss_cls = sigmoid_focal_loss_jit( cat(pred_logits, dim=1)[valid_mask], gt_labels_target.to(pred_logits[0].dtype), alpha=self.focal_loss_alpha, gamma=self.focal_loss_gamma, reduction="sum", ) loss_box_reg = _dense_box_regression_loss( anchors, self.box2box_transform, pred_anchor_deltas, gt_boxes, pos_mask, box_reg_loss_type=self.box_reg_loss_type, smooth_l1_beta=self.smooth_l1_beta, ) return { "loss_cls": loss_cls / normalizer, "loss_box_reg": loss_box_reg / normalizer, } @torch.no_grad() def label_anchors(self, anchors, gt_instances): """ Args: anchors (list[Boxes]): A list of #feature level Boxes. The Boxes contains anchors of this image on the specific feature level. gt_instances (list[Instances]): a list of N `Instances`s. The i-th `Instances` contains the ground-truth per-instance annotations for the i-th input image. Returns: list[Tensor]: List of #img tensors. i-th element is a vector of labels whose length is the total number of anchors across all feature maps (sum(Hi * Wi * A)). Label values are in {-1, 0, ..., K}, with -1 means ignore, and K means background. list[Tensor]: i-th element is a Rx4 tensor, where R is the total number of anchors across feature maps. The values are the matched gt boxes for each anchor. Values are undefined for those anchors not labeled as foreground. """
# Copyright (c) Facebook, Inc. and its affiliates. __all__ = ["RetinaNet"] logger = logging.getLogger(__name__) @META_ARCH_REGISTRY.register() class RetinaNet(DenseDetector): """ Implement RetinaNet in :paper:`RetinaNet`. """ @configurable def __init__( self, *, backbone: Backbone, head: nn.Module, head_in_features, anchor_generator, box2box_transform, anchor_matcher, num_classes, focal_loss_alpha=0.25, focal_loss_gamma=2.0, smooth_l1_beta=0.0, box_reg_loss_type="smooth_l1", test_score_thresh=0.05, test_topk_candidates=1000, test_nms_thresh=0.5, max_detections_per_image=100, pixel_mean, pixel_std, vis_period=0, input_format="BGR", ): """ NOTE: this interface is experimental. Args: backbone: a backbone module, must follow detectron2's backbone interface head (nn.Module): a module that predicts logits and regression deltas for each level from a list of per-level features head_in_features (Tuple[str]): Names of the input feature maps to be used in head anchor_generator (nn.Module): a module that creates anchors from a list of features. Usually an instance of :class:`AnchorGenerator` box2box_transform (Box2BoxTransform): defines the transform from anchors boxes to instance boxes anchor_matcher (Matcher): label the anchors by matching them with ground truth. num_classes (int): number of classes. Used to label background proposals. # Loss parameters: focal_loss_alpha (float): focal_loss_alpha focal_loss_gamma (float): focal_loss_gamma smooth_l1_beta (float): smooth_l1_beta box_reg_loss_type (str): Options are "smooth_l1", "giou", "diou", "ciou" # Inference parameters: test_score_thresh (float): Inference cls score threshold, only anchors with score > INFERENCE_TH are considered for inference (to improve speed) test_topk_candidates (int): Select topk candidates before NMS test_nms_thresh (float): Overlap threshold used for non-maximum suppression (suppress boxes with IoU >= this threshold) max_detections_per_image (int): Maximum number of detections to return per image during inference (100 is based on the limit established for the COCO dataset). pixel_mean, pixel_std: see :class:`DenseDetector`. """ super().__init__( backbone, head, head_in_features, pixel_mean=pixel_mean, pixel_std=pixel_std ) self.num_classes = num_classes # Anchors self.anchor_generator = anchor_generator self.box2box_transform = box2box_transform self.anchor_matcher = anchor_matcher # Loss parameters: self.focal_loss_alpha = focal_loss_alpha self.focal_loss_gamma = focal_loss_gamma self.smooth_l1_beta = smooth_l1_beta self.box_reg_loss_type = box_reg_loss_type # Inference parameters: self.test_score_thresh = test_score_thresh self.test_topk_candidates = test_topk_candidates self.test_nms_thresh = test_nms_thresh self.max_detections_per_image = max_detections_per_image # Vis parameters self.vis_period = vis_period self.input_format = input_format @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) backbone_shape = backbone.output_shape() feature_shapes = [backbone_shape[f] for f in cfg.MODEL.RETINANET.IN_FEATURES] head = RetinaNetHead(cfg, feature_shapes) anchor_generator = build_anchor_generator(cfg, feature_shapes) return { "backbone": backbone, "head": head, "anchor_generator": anchor_generator, "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.RETINANET.BBOX_REG_WEIGHTS), "anchor_matcher": Matcher( cfg.MODEL.RETINANET.IOU_THRESHOLDS, cfg.MODEL.RETINANET.IOU_LABELS, allow_low_quality_matches=True, ), "pixel_mean": cfg.MODEL.PIXEL_MEAN, "pixel_std": cfg.MODEL.PIXEL_STD, "num_classes": cfg.MODEL.RETINANET.NUM_CLASSES, "head_in_features": cfg.MODEL.RETINANET.IN_FEATURES, # Loss parameters: "focal_loss_alpha": cfg.MODEL.RETINANET.FOCAL_LOSS_ALPHA, "focal_loss_gamma": cfg.MODEL.RETINANET.FOCAL_LOSS_GAMMA, "smooth_l1_beta": cfg.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA, "box_reg_loss_type": cfg.MODEL.RETINANET.BBOX_REG_LOSS_TYPE, # Inference parameters: "test_score_thresh": cfg.MODEL.RETINANET.SCORE_THRESH_TEST, "test_topk_candidates": cfg.MODEL.RETINANET.TOPK_CANDIDATES_TEST, "test_nms_thresh": cfg.MODEL.RETINANET.NMS_THRESH_TEST, "max_detections_per_image": cfg.TEST.DETECTIONS_PER_IMAGE, # Vis parameters "vis_period": cfg.VIS_PERIOD, "input_format": cfg.INPUT.FORMAT, } def forward_training(self, images, features, predictions, gt_instances): # Transpose the Hi*Wi*A dimension to the middle: pred_logits, pred_anchor_deltas = self._transpose_dense_predictions( predictions, [self.num_classes, 4] ) anchors = self.anchor_generator(features) gt_labels, gt_boxes = self.label_anchors(anchors, gt_instances) return self.losses(anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes) def losses(self, anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes): """ Args: anchors (list[Boxes]): a list of #feature level Boxes gt_labels, gt_boxes: see output of :meth:`RetinaNet.label_anchors`. Their shapes are (N, R) and (N, R, 4), respectively, where R is the total number of anchors across levels, i.e. sum(Hi x Wi x Ai) pred_logits, pred_anchor_deltas: both are list[Tensor]. Each element in the list corresponds to one level and has shape (N, Hi * Wi * Ai, K or 4). Where K is the number of classes used in `pred_logits`. Returns: dict[str, Tensor]: mapping from a named loss to a scalar tensor storing the loss. Used during training only. The dict keys are: "loss_cls" and "loss_box_reg" """ num_images = len(gt_labels) gt_labels = torch.stack(gt_labels) # (N, R) valid_mask = gt_labels >= 0 pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes) num_pos_anchors = pos_mask.sum().item() get_event_storage().put_scalar("num_pos_anchors", num_pos_anchors / num_images) normalizer = self._ema_update("loss_normalizer", max(num_pos_anchors, 1), 100) # classification and regression loss gt_labels_target = F.one_hot(gt_labels[valid_mask], num_classes=self.num_classes + 1)[ :, :-1 ] # no loss for the last (background) class loss_cls = sigmoid_focal_loss_jit( cat(pred_logits, dim=1)[valid_mask], gt_labels_target.to(pred_logits[0].dtype), alpha=self.focal_loss_alpha, gamma=self.focal_loss_gamma, reduction="sum", ) loss_box_reg = _dense_box_regression_loss( anchors, self.box2box_transform, pred_anchor_deltas, gt_boxes, pos_mask, box_reg_loss_type=self.box_reg_loss_type, smooth_l1_beta=self.smooth_l1_beta, ) return { "loss_cls": loss_cls / normalizer, "loss_box_reg": loss_box_reg / normalizer, } @torch.no_grad() def label_anchors(self, anchors, gt_instances): """ Args: anchors (list[Boxes]): A list of #feature level Boxes. The Boxes contains anchors of this image on the specific feature level. gt_instances (list[Instances]): a list of N `Instances`s. The i-th `Instances` contains the ground-truth per-instance annotations for the i-th input image. Returns: list[Tensor]: List of #img tensors. i-th element is a vector of labels whose length is the total number of anchors across all feature maps (sum(Hi * Wi * A)). Label values are in {-1, 0, ..., K}, with -1 means ignore, and K means background. list[Tensor]: i-th element is a Rx4 tensor, where R is the total number of anchors across feature maps. The values are the matched gt boxes for each anchor. Values are undefined for those anchors not labeled as foreground. """
anchors = Boxes.cat(anchors) # Rx4
6
2023-12-05 02:51:53+00:00
24k
DiffusionLight/DiffusionLight
relighting/inpainter.py
[ { "identifier": "CustomStableDiffusionControlNetInpaintPipeline", "path": "relighting/pipeline.py", "snippet": "class CustomStableDiffusionControlNetInpaintPipeline(StableDiffusionControlNetInpaintPipeline):\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] =...
import torch import numpy as np import os import pickle from diffusers import ControlNetModel, AutoencoderKL from PIL import Image from tqdm.auto import tqdm from transformers import pipeline as transformers_pipeline from relighting.pipeline import CustomStableDiffusionControlNetInpaintPipeline from relighting.pipeline_inpaintonly import CustomStableDiffusionInpaintPipeline, CustomStableDiffusionXLInpaintPipeline from relighting.argument import SAMPLERS, VAE_MODELS, DEPTH_ESTIMATOR, get_control_signal_type from relighting.image_processor import ( estimate_scene_depth, estimate_scene_normal, merge_normal_map, fill_depth_circular ) from relighting.ball_processor import get_ideal_normal_ball, crop_ball from relighting.pipeline_xl import CustomStableDiffusionXLControlNetInpaintPipeline
18,556
control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sdxl_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index) control_image = estimate_scene_depth(input_image, depth_estimator=self.depth_estimator) xs = [x] if not isinstance(x, list) else x ys = [y] if not isinstance(y, list) else y rs = [r] if not isinstance(r, list) else r for x, y, r in zip(xs, ys, rs): #print(f"depth at {x}, {y}, {r}") control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sd_normal(self, input_image, normal_ball, mask_ball, x, y, r=None, normal_ball_path=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index) normal_scene = estimate_scene_normal(input_image, depth_estimator=self.depth_estimator) normal_image = merge_normal_map(normal_scene, normal_ball, mask_ball, x, y) normal_image = (normal_image * 127.5 + 127.5).clip(0, 255).astype(np.uint8) control_image = Image.fromarray(normal_image) return control_image def __call__(self, *args, **kwargs): process_fn = getattr(self, f"process_{self.sd_arch}_{self.control_signal_type}", None) if process_fn is None: raise ValueError else: return process_fn(*args, **kwargs) class BallInpainter(): def __init__(self, pipeline, sd_arch, control_generator, disable_water_mask=True): self.pipeline = pipeline self.sd_arch = sd_arch self.control_generator = control_generator self.median = {} if disable_water_mask: self._disable_water_mask() def _disable_water_mask(self): if hasattr(self.pipeline, "watermark"): self.pipeline.watermark = NoWaterMark() print("Disabled watermasking") @classmethod def from_sd(cls, model, controlnet=None, device=0, sampler="unipc", torch_dtype=torch.float16, disable_water_mask=True, offload=False ): if controlnet is not None: control_signal_type = get_control_signal_type(controlnet) controlnet = ControlNetModel.from_pretrained(controlnet, torch_dtype=torch.float16) pipe = CustomStableDiffusionControlNetInpaintPipeline.from_pretrained( model, controlnet=controlnet, torch_dtype=torch_dtype, ).to(device) control_generator = ControlSignalGenerator("sd", control_signal_type, device=device) else: pipe = CustomStableDiffusionInpaintPipeline.from_pretrained( model, torch_dtype=torch_dtype, ).to(device) control_generator = None try: if torch_dtype==torch.float16 and device != torch.device("cpu"): pipe.enable_xformers_memory_efficient_attention() except: pass pipe.set_progress_bar_config(disable=True) pipe.scheduler = SAMPLERS[sampler].from_config(pipe.scheduler.config) return BallInpainter(pipe, "sd", control_generator, disable_water_mask) @classmethod def from_sdxl(cls, model, controlnet=None, device=0, sampler="unipc", torch_dtype=torch.float16, disable_water_mask=True, use_fixed_vae=True, offload=False ): vae = VAE_MODELS["sdxl"] vae = AutoencoderKL.from_pretrained(vae, torch_dtype=torch_dtype).to(device) if use_fixed_vae else None extra_kwargs = {"vae": vae} if vae is not None else {} if controlnet is not None: control_signal_type = get_control_signal_type(controlnet) controlnet = ControlNetModel.from_pretrained( controlnet, variant="fp16" if torch_dtype == torch.float16 else None, use_safetensors=True, torch_dtype=torch_dtype, ).to(device) pipe = CustomStableDiffusionXLControlNetInpaintPipeline.from_pretrained( model, controlnet=controlnet, variant="fp16" if torch_dtype == torch.float16 else None, use_safetensors=True, torch_dtype=torch_dtype, **extra_kwargs, ).to(device) control_generator = ControlSignalGenerator("sdxl", control_signal_type, device=device) else:
class NoWaterMark: def apply_watermark(self, *args, **kwargs): return args[0] class ControlSignalGenerator(): def __init__(self, sd_arch, control_signal_type, device): self.sd_arch = sd_arch self.control_signal_type = control_signal_type self.device = device def process_sd_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", device=self.device.index) control_image = self.depth_estimator(input_image)['depth'] control_image = np.array(control_image) control_image = control_image[:, :, None] control_image = np.concatenate([control_image, control_image, control_image], axis=2) control_image = Image.fromarray(control_image) control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sdxl_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index) control_image = estimate_scene_depth(input_image, depth_estimator=self.depth_estimator) xs = [x] if not isinstance(x, list) else x ys = [y] if not isinstance(y, list) else y rs = [r] if not isinstance(r, list) else r for x, y, r in zip(xs, ys, rs): #print(f"depth at {x}, {y}, {r}") control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sd_normal(self, input_image, normal_ball, mask_ball, x, y, r=None, normal_ball_path=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index) normal_scene = estimate_scene_normal(input_image, depth_estimator=self.depth_estimator) normal_image = merge_normal_map(normal_scene, normal_ball, mask_ball, x, y) normal_image = (normal_image * 127.5 + 127.5).clip(0, 255).astype(np.uint8) control_image = Image.fromarray(normal_image) return control_image def __call__(self, *args, **kwargs): process_fn = getattr(self, f"process_{self.sd_arch}_{self.control_signal_type}", None) if process_fn is None: raise ValueError else: return process_fn(*args, **kwargs) class BallInpainter(): def __init__(self, pipeline, sd_arch, control_generator, disable_water_mask=True): self.pipeline = pipeline self.sd_arch = sd_arch self.control_generator = control_generator self.median = {} if disable_water_mask: self._disable_water_mask() def _disable_water_mask(self): if hasattr(self.pipeline, "watermark"): self.pipeline.watermark = NoWaterMark() print("Disabled watermasking") @classmethod def from_sd(cls, model, controlnet=None, device=0, sampler="unipc", torch_dtype=torch.float16, disable_water_mask=True, offload=False ): if controlnet is not None: control_signal_type = get_control_signal_type(controlnet) controlnet = ControlNetModel.from_pretrained(controlnet, torch_dtype=torch.float16) pipe = CustomStableDiffusionControlNetInpaintPipeline.from_pretrained( model, controlnet=controlnet, torch_dtype=torch_dtype, ).to(device) control_generator = ControlSignalGenerator("sd", control_signal_type, device=device) else: pipe = CustomStableDiffusionInpaintPipeline.from_pretrained( model, torch_dtype=torch_dtype, ).to(device) control_generator = None try: if torch_dtype==torch.float16 and device != torch.device("cpu"): pipe.enable_xformers_memory_efficient_attention() except: pass pipe.set_progress_bar_config(disable=True) pipe.scheduler = SAMPLERS[sampler].from_config(pipe.scheduler.config) return BallInpainter(pipe, "sd", control_generator, disable_water_mask) @classmethod def from_sdxl(cls, model, controlnet=None, device=0, sampler="unipc", torch_dtype=torch.float16, disable_water_mask=True, use_fixed_vae=True, offload=False ): vae = VAE_MODELS["sdxl"] vae = AutoencoderKL.from_pretrained(vae, torch_dtype=torch_dtype).to(device) if use_fixed_vae else None extra_kwargs = {"vae": vae} if vae is not None else {} if controlnet is not None: control_signal_type = get_control_signal_type(controlnet) controlnet = ControlNetModel.from_pretrained( controlnet, variant="fp16" if torch_dtype == torch.float16 else None, use_safetensors=True, torch_dtype=torch_dtype, ).to(device) pipe = CustomStableDiffusionXLControlNetInpaintPipeline.from_pretrained( model, controlnet=controlnet, variant="fp16" if torch_dtype == torch.float16 else None, use_safetensors=True, torch_dtype=torch_dtype, **extra_kwargs, ).to(device) control_generator = ControlSignalGenerator("sdxl", control_signal_type, device=device) else:
pipe = CustomStableDiffusionXLInpaintPipeline.from_pretrained(
2
2023-12-07 14:03:31+00:00
24k
modelscope/normal-depth-diffusion
ldm/models/diffusion/mv_ddpm.py
[ { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\...
import pdb import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn import torch.nn.functional as F from contextlib import contextmanager from functools import partial from einops import rearrange, repeat from ldm.models.autoencoder import (AutoencoderKL, IdentityFirstStage, VQModelInterface) from ldm.models.diffusion.ddim import DDIMSampler from ldm.models.diffusion.dpm_solver import DPMSolverSampler from ldm.models.diffusion.plms import PLMSSampler from ldm.modules.attention import CrossAttention from ldm.modules.diffusionmodules.util import (extract_into_tensor, make_beta_schedule, noise_like) from ldm.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl) from ldm.modules.ema import LitEma from ldm.util import (count_params, default, exists, filter_nan_loss, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat) from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from pytorch_lightning.utilities.distributed import rank_zero_only from pytorch_lightning.utilities.rank_zero import rank_zero_only
15,149
else: return int(self.start_steps + self.start_steps * upper_bound(self.anneal_global_step, global_step)) class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule='linear', loss_type='l2', ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor='val/loss', use_ema=True, first_stage_key='image', image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization='eps', # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., anneal_t=False, # we find at the begining, smaller t, larger denoise mse loss. anneal_global_step=[], anneal_ratio=0.9, prior_model=None, prior_normal=None, input_keys=['rgb'], ): super().__init__() assert parameterization in [ 'eps', 'x0' ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f'{self.__class__.__name__}: Running in {self.parameterization}-prediction mode' ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f'Keeping EMAs of {len(list(self.model_ema.buffers()))}.') self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight self.input_keys = input_keys if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full( fill_value=logvar_init, size=(self.num_timesteps, )) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) ### anneal t function if not anneal_t: self.anneal_func = anneal_identity() else: self.anneal_func = anneal_warmup(anneal_ratio, anneal_global_step, self.num_timesteps) if prior_model is not None: self.prior_model = instantiate_from_config(prior_model) else: self.prior_model = None if prior_normal is not None: self.prior_normal = instantiate_from_config(prior_normal) else: self.prior_normal = None def register_schedule(self, given_betas=None, beta_schedule='linear', timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ try: except: __conditioning_keys__ = { 'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y' } def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class anneal_identity(): def __call__(self, x, global_step): return x def upper_bound(arr, key): left = 0 right = len(arr) while left < right: mid = (left + right) >> 1 if arr[mid] < key: left = mid + 1 else: right = mid return left class anneal_warmup(): def __init__(self, anneal_ratio, anneal_global_step, num_steps): self.anneal_ratio = anneal_ratio self.anneal_global_step = anneal_global_step self.steps = num_steps // (len(anneal_global_step) + 1) self.start_steps = self.steps def __call__(self, x, global_step): if (torch.rand(1) > self.anneal_ratio).item(): return x else: return int(self.start_steps + self.start_steps * upper_bound(self.anneal_global_step, global_step)) class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule='linear', loss_type='l2', ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor='val/loss', use_ema=True, first_stage_key='image', image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization='eps', # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., anneal_t=False, # we find at the begining, smaller t, larger denoise mse loss. anneal_global_step=[], anneal_ratio=0.9, prior_model=None, prior_normal=None, input_keys=['rgb'], ): super().__init__() assert parameterization in [ 'eps', 'x0' ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f'{self.__class__.__name__}: Running in {self.parameterization}-prediction mode' ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f'Keeping EMAs of {len(list(self.model_ema.buffers()))}.') self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight self.input_keys = input_keys if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full( fill_value=logvar_init, size=(self.num_timesteps, )) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) ### anneal t function if not anneal_t: self.anneal_func = anneal_identity() else: self.anneal_func = anneal_warmup(anneal_ratio, anneal_global_step, self.num_timesteps) if prior_model is not None: self.prior_model = instantiate_from_config(prior_model) else: self.prior_model = None if prior_normal is not None: self.prior_normal = instantiate_from_config(prior_normal) else: self.prior_normal = None def register_schedule(self, given_betas=None, beta_schedule='linear', timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
if exists(given_betas):
15
2023-12-06 07:29:34+00:00
24k
RobertCsordas/moe_attention
tasks/simple/language_model/transformer_lm_mixin.py
[ { "identifier": "TransformerLanguageModel", "path": "models/transformer_language_model.py", "snippet": "class TransformerLanguageModel(LoggingLayer, torch.nn.Module):\n def __init__(self, voc_size: int, embedding_size: Optional[int], state_size: int, dropout: float,\n tied_embedding: ...
import framework import torch import torch.nn import torch.nn.functional as F import torch.utils.data import math from typing import List, Tuple, Dict, Any from models import TransformerLanguageModel from ... import task, args from layers.transformer import RelativeTransformerEncoderLayer, PrelnRelativeTransformerEncoderLayer from layers.transformer.relative_moe_transformer import RelativeMoeTransformerEncoderLayer from layers.transformer.fast_rope_transformer import FastRopeTransformerEncoderLayer from layers.transformer.moe_attention_relative_transformer import MoeAttentionRelativeTransformerEncoderLayer from layers.moe_layer import MoE from interfaces import Result from layers import LayerVisualizer from layers.transformer.full_moe_relative_attention import FullMoeRelativeAttentionCore
20,164
parser.add_argument("-moe.att.n_experts", default=4) parser.add_argument("-moe.att.variant", default="moa", choice=["moa", "simple", "qside", "full", "full_rope", "seq", "target"]) parser.add_argument("-moe.att.enable", default=False) parser.add_argument("-moe.att.q_expert", default=True) parser.add_argument("-moe.att.k_expert", default=True) parser.add_argument("-moe.att.v_expert", default=True) parser.add_argument("-moe.att.o_expert", default=True) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_qk", default=False) parser.add_argument("-moe.att.v_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.same_sel", default=False) parser.add_argument("-moe.att.expert_dropout", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.selection_mode", default="sigmoid", choice=["sigmoid", "softmax"]) parser.add_argument("-moe.att.perplexity_reg", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.qside_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_ret", default=False) parser.add_argument("-moe.att.shared_experts", default=False) parser.add_argument("-moe.att.drop_expert", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.kq_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.separate_kq_sel", default=False) parser.add_argument("-moe.att.norm_init", default=False) parser.add_argument("-rope.rotate_fraction", default=0.5) parser.add_argument("-rope.base", default=10000.0) parser.add_argument("-moa.mode", default="my", choice=["my", "moa"]) parser.add_argument("-moa.cvloss", default=0.0) parser.add_argument("-moa.switchloss", default=0.0) parser.add_argument("-moa.zloss", default=0.0) parser.add_argument("-debug_plot_interval", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.plot_head_details", default=False) parser.add_argument("-plot.n_steps", default=-128) @task() class TransformerLMMixin: helper: framework.helpers.TrainingHelper def is_preln(self) -> bool: return "preln" in self.helper.args.transformer.variant def topk_activation(self, x: torch.Tensor) -> torch.Tensor: nx = -x return torch.masked_fill(x, nx <= nx.kthvalue(self.helper.args.transformer.topk_value, keepdim=True)[0], 0) def get_layers(self) -> List[torch.nn.Module]: # pyright: reportOptionalMemberAccess=false if self.helper.args.transformer.activation == "relu": activation = F.relu elif self.helper.args.transformer.activation == "topk": activation = self.topk_activation elif self.helper.args.transformer.activation == "identity": activation = lambda x: x elif self.helper.args.transformer.activation == "sigmoid": activation = torch.sigmoid elif self.helper.args.transformer.activation == "gelu": activation = F.gelu elif self.helper.args.transformer.activation == "softmax": activation = lambda x: F.softmax(x, dim=-1) else: raise ValueError(f"Invalid activation: {self.helper.args.transformer.activation}") base_args = dict( d_model=self.helper.args.state_size, nhead=self.helper.args.transformer.n_heads, dropout=self.helper.args.dropout, activation=activation ) if self.helper.args.transformer.variant not in {"preln_moe", "moe"}: base_args["dim_feedforward"]=int(self.helper.args.state_size * self.helper.args.transformer.ff_multiplier) extra_args = {} if not self.helper.args.transformer.variant.endswith("_gelu") else { "activation": F.gelu, "drop_expand": False } if self.helper.args.transformer.variant in {"preln_relative"}: mklayer = lambda: PrelnRelativeTransformerEncoderLayer( **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size,) elif self.helper.args.transformer.variant in {"preln_moeatt"}: mklayer = lambda: MoeAttentionRelativeTransformerEncoderLayer( **base_args, **extra_args, moe_att_n_experts=self.helper.args.moe.att.n_experts, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size, att_perplexity_reg=self.helper.args.moe.perplexity_reg if self.helper.args.moe.att.perplexity_reg is None else self.helper.args.moe.att.perplexity_reg, expert_dropout=self.helper.args.moe.drop_expert if self.helper.args.moe.att.drop_expert is None else self.helper.args.moe.att.drop_expert, att_selection_mode=self.helper.args.moe.att.selection_mode, preln=self.is_preln(), attention_variant=self.helper.args.moe.att.variant, q_expert=self.helper.args.moe.att.q_expert, k_expert=self.helper.args.moe.att.k_expert, v_expert=self.helper.args.moe.att.v_expert, o_expert=self.helper.args.moe.att.o_expert, norm_qk_score=self.helper.args.moe.att.norm_qk, v_projection_size=self.helper.args.moe.att.v_size, same_sel=self.helper.args.moe.att.same_sel, moe_k=self.helper.args.moe.att.k, qside_n_experts=self.helper.args.moe.att.qside_n_experts, shared_experts=self.helper.args.moe.att.shared_experts, kq_n_experts=self.helper.args.moe.att.kq_n_experts, separate_kq_sel=self.helper.args.moe.att.separate_kq_sel, moa_mode=self.helper.args.moa.mode, cvloss=self.helper.args.moa.cvloss, switchloss=self.helper.args.moa.switchloss, zloss=self.helper.args.moa.zloss, rotate_fraction=self.helper.args.rope.rotate_fraction, rope_base=self.helper.args.rope.base, moeatt_norm_init=self.helper.args.moe.att.norm_init) elif self.helper.args.transformer.variant in {"preln_rope", "rope"}: mklayer = lambda: FastRopeTransformerEncoderLayer( **base_args, **extra_args, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size, preln=self.is_preln(), rotate_fraction = self.helper.args.rope.rotate_fraction, rope_base=self.helper.args.rope.base) elif self.helper.args.transformer.variant in {"preln_moe", "moe"}: # def __init__(self, d_model, nhead, n_bins: int, bin_size: int, n_layers: int, dim_feedforward=2048,
@args def a(parser: framework.helpers.ArgumentParser): parser.add_argument("-lm.trafo.context_blocks", default=1) parser.add_argument("-lm.trafo.test_context_blocks", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.test_pos_clamp", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.same_length_eval", default=False) parser.add_argument("-lm.trafo.same_length", default=False) parser.add_argument("-lm.trafo.last_layer_context", default=False) parser.add_argument("-lm.trafo.xl_init", default=False) parser.add_argument("-lm.trafo.embedding_mode_init", default="default", choice=["default", "scale_to_sqrt_dmodel", "init_to_sqrt_dmodel", "one_and_scale_to_sqrt_dmodel", "like_preln"]) parser.add_argument("-pkm.n_heads", default=1) parser.add_argument("-moe.n_experts", default=128) parser.add_argument("-moe.expert_size", default=128) parser.add_argument("-moe.selection_mode", default="sigmoid", choice=["gate", "sigmoid", "mul"]) parser.add_argument("-moe.perplexity_reg", default=0.0) parser.add_argument("-moe.perplexity_reg_mode", default="step", choice=["step", "global", "time", "global_time"]) parser.add_argument("-moe.reg_type", default="entropy", choice=["perplexity", "variance", "entropy", "l2", "switch", "normal"]) parser.add_argument("-moe.norm_keys", default=False) parser.add_argument("-moe.n_random", default=0) parser.add_argument("-moe.topk_mode", default="full", choice=["full", "l1_approx", "approx"]) parser.add_argument("-moe.activation_after_topk", default=False) parser.add_argument("-moe.drop_parallel", default=True) parser.add_argument("-moe.norm_key_init", default=False) parser.add_argument("-moe.norm_value_init", default=False) parser.add_argument("-moe.identical_init", default=False) parser.add_argument("-moe.sel_lr_multipler", default=1.0) parser.add_argument("-moe.expert_lr_multipler", default=1.0) parser.add_argument("-moe.sel_norm", default="none", choice=["none", "cos", "input", "weights"]) parser.add_argument("-moe.dropout_factor", default=1.0) parser.add_argument("-moe.drop_expert", default=0.0) parser.add_argument("-moe.sync_distributed", default=True) parser.add_argument("-moe.modulation_amplitude", default=0.5) parser.add_argument("-moe.init_scale", default=1.0) parser.add_argument("-moe.norm_expert_sel_init", default=False) parser.add_argument("-kvmem.dropout", default="none", choice=["none", "early", "late", "weight", "score"]) parser.add_argument("-kvmem.norm_values", default=False) parser.add_argument("-transformer.topk_value", default=32) parser.add_argument("-transformer.activation", default="relu", choice=["relu", "topk", "gelu", "identity", "sigmoid", "softmax"]) parser.add_argument("-transformer.p_drop_layer", default=0.0) parser.add_argument("-transformer.head_projection_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.ln_affine", default=True) parser.add_argument("-transformer.ln_after_attention", default=True) parser.add_argument("-moe.att.n_experts", default=4) parser.add_argument("-moe.att.variant", default="moa", choice=["moa", "simple", "qside", "full", "full_rope", "seq", "target"]) parser.add_argument("-moe.att.enable", default=False) parser.add_argument("-moe.att.q_expert", default=True) parser.add_argument("-moe.att.k_expert", default=True) parser.add_argument("-moe.att.v_expert", default=True) parser.add_argument("-moe.att.o_expert", default=True) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_qk", default=False) parser.add_argument("-moe.att.v_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.same_sel", default=False) parser.add_argument("-moe.att.expert_dropout", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.selection_mode", default="sigmoid", choice=["sigmoid", "softmax"]) parser.add_argument("-moe.att.perplexity_reg", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.qside_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_ret", default=False) parser.add_argument("-moe.att.shared_experts", default=False) parser.add_argument("-moe.att.drop_expert", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.kq_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.separate_kq_sel", default=False) parser.add_argument("-moe.att.norm_init", default=False) parser.add_argument("-rope.rotate_fraction", default=0.5) parser.add_argument("-rope.base", default=10000.0) parser.add_argument("-moa.mode", default="my", choice=["my", "moa"]) parser.add_argument("-moa.cvloss", default=0.0) parser.add_argument("-moa.switchloss", default=0.0) parser.add_argument("-moa.zloss", default=0.0) parser.add_argument("-debug_plot_interval", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.plot_head_details", default=False) parser.add_argument("-plot.n_steps", default=-128) @task() class TransformerLMMixin: helper: framework.helpers.TrainingHelper def is_preln(self) -> bool: return "preln" in self.helper.args.transformer.variant def topk_activation(self, x: torch.Tensor) -> torch.Tensor: nx = -x return torch.masked_fill(x, nx <= nx.kthvalue(self.helper.args.transformer.topk_value, keepdim=True)[0], 0) def get_layers(self) -> List[torch.nn.Module]: # pyright: reportOptionalMemberAccess=false if self.helper.args.transformer.activation == "relu": activation = F.relu elif self.helper.args.transformer.activation == "topk": activation = self.topk_activation elif self.helper.args.transformer.activation == "identity": activation = lambda x: x elif self.helper.args.transformer.activation == "sigmoid": activation = torch.sigmoid elif self.helper.args.transformer.activation == "gelu": activation = F.gelu elif self.helper.args.transformer.activation == "softmax": activation = lambda x: F.softmax(x, dim=-1) else: raise ValueError(f"Invalid activation: {self.helper.args.transformer.activation}") base_args = dict( d_model=self.helper.args.state_size, nhead=self.helper.args.transformer.n_heads, dropout=self.helper.args.dropout, activation=activation ) if self.helper.args.transformer.variant not in {"preln_moe", "moe"}: base_args["dim_feedforward"]=int(self.helper.args.state_size * self.helper.args.transformer.ff_multiplier) extra_args = {} if not self.helper.args.transformer.variant.endswith("_gelu") else { "activation": F.gelu, "drop_expand": False } if self.helper.args.transformer.variant in {"preln_relative"}: mklayer = lambda: PrelnRelativeTransformerEncoderLayer( **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size,) elif self.helper.args.transformer.variant in {"preln_moeatt"}: mklayer = lambda: MoeAttentionRelativeTransformerEncoderLayer( **base_args, **extra_args, moe_att_n_experts=self.helper.args.moe.att.n_experts, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size, att_perplexity_reg=self.helper.args.moe.perplexity_reg if self.helper.args.moe.att.perplexity_reg is None else self.helper.args.moe.att.perplexity_reg, expert_dropout=self.helper.args.moe.drop_expert if self.helper.args.moe.att.drop_expert is None else self.helper.args.moe.att.drop_expert, att_selection_mode=self.helper.args.moe.att.selection_mode, preln=self.is_preln(), attention_variant=self.helper.args.moe.att.variant, q_expert=self.helper.args.moe.att.q_expert, k_expert=self.helper.args.moe.att.k_expert, v_expert=self.helper.args.moe.att.v_expert, o_expert=self.helper.args.moe.att.o_expert, norm_qk_score=self.helper.args.moe.att.norm_qk, v_projection_size=self.helper.args.moe.att.v_size, same_sel=self.helper.args.moe.att.same_sel, moe_k=self.helper.args.moe.att.k, qside_n_experts=self.helper.args.moe.att.qside_n_experts, shared_experts=self.helper.args.moe.att.shared_experts, kq_n_experts=self.helper.args.moe.att.kq_n_experts, separate_kq_sel=self.helper.args.moe.att.separate_kq_sel, moa_mode=self.helper.args.moa.mode, cvloss=self.helper.args.moa.cvloss, switchloss=self.helper.args.moa.switchloss, zloss=self.helper.args.moa.zloss, rotate_fraction=self.helper.args.rope.rotate_fraction, rope_base=self.helper.args.rope.base, moeatt_norm_init=self.helper.args.moe.att.norm_init) elif self.helper.args.transformer.variant in {"preln_rope", "rope"}: mklayer = lambda: FastRopeTransformerEncoderLayer( **base_args, **extra_args, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size, preln=self.is_preln(), rotate_fraction = self.helper.args.rope.rotate_fraction, rope_base=self.helper.args.rope.base) elif self.helper.args.transformer.variant in {"preln_moe", "moe"}: # def __init__(self, d_model, nhead, n_bins: int, bin_size: int, n_layers: int, dim_feedforward=2048,
mklayer = lambda: RelativeMoeTransformerEncoderLayer(
5
2023-12-13 08:45:02+00:00
24k
AIFSH/NativeDancer
nativedancer/third_part/detectron2/utils/visualizer.py
[ { "identifier": "MetadataCatalog", "path": "nativedancer/third_part/detectron2/data/catalog.py", "snippet": "class _DatasetCatalog(UserDict):\nclass Metadata(types.SimpleNamespace):\nclass _MetadataCatalog(UserDict):\n def register(self, name, func):\n def get(self, name):\n def list(self) -> L...
import colorsys import logging import math import numpy as np import cv2 import matplotlib as mpl import matplotlib.colors as mplc import matplotlib.figure as mplfigure import pycocotools.mask as mask_util import torch from enum import Enum, unique from matplotlib.backends.backend_agg import FigureCanvasAgg from PIL import Image from ..data import MetadataCatalog from ..structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes from ..utils.file_io import PathManager from .colormap import random_color from panopticapi.utils import rgb2id
17,119
polygon = mpl.patches.Polygon( segment, fill=True, facecolor=mplc.to_rgb(color) + (alpha,), edgecolor=edge_color, linewidth=max(self._default_font_size // 15 * self.output.scale, 1), ) self.output.ax.add_patch(polygon) return self.output """ Internal methods: """ def _jitter(self, color): """ Randomly modifies given color to produce a slightly different color than the color given. Args: color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color picked. The values in the list are in the [0.0, 1.0] range. Returns: jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color after being jittered. The values in the list are in the [0.0, 1.0] range. """ color = mplc.to_rgb(color) vec = np.random.rand(3) # better to do it in another color space vec = vec / np.linalg.norm(vec) * 0.5 res = np.clip(vec + color, 0, 1) return tuple(res) def _create_grayscale_image(self, mask=None): """ Create a grayscale version of the original image. The colors in masked area, if given, will be kept. """ img_bw = self.img.astype("f4").mean(axis=2) img_bw = np.stack([img_bw] * 3, axis=2) if mask is not None: img_bw[mask] = self.img[mask] return img_bw def _change_color_brightness(self, color, brightness_factor): """ Depending on the brightness_factor, gives a lighter or darker color i.e. a color with less or more saturation than the original color. Args: color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of 0 will correspond to no change, a factor in [-1.0, 0) range will result in a darker color and a factor in (0, 1.0] range will result in a lighter color. Returns: modified_color (tuple[double]): a tuple containing the RGB values of the modified color. Each value in the tuple is in the [0.0, 1.0] range. """ assert brightness_factor >= -1.0 and brightness_factor <= 1.0 color = mplc.to_rgb(color) polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) return tuple(np.clip(modified_color, 0.0, 1.0)) def _convert_boxes(self, boxes): """ Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension. """ if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes): return boxes.tensor.detach().numpy() else: return np.asarray(boxes) def _convert_masks(self, masks_or_polygons): """ Convert different format of masks or polygons to a tuple of masks and polygons. Returns: list[GenericMask]: """ m = masks_or_polygons if isinstance(m, PolygonMasks): m = m.polygons if isinstance(m, BitMasks): m = m.tensor.numpy() if isinstance(m, torch.Tensor): m = m.numpy() ret = [] for x in m: if isinstance(x, GenericMask): ret.append(x) else: ret.append(GenericMask(x, self.output.height, self.output.width)) return ret def _draw_text_in_mask(self, binary_mask, text, color): """ Find proper places to draw text given a binary mask. """ # TODO sometimes drawn on wrong objects. the heuristics here can improve. _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8) if stats[1:, -1].size == 0: return largest_component_id = np.argmax(stats[1:, -1]) + 1 # draw text on the largest component, as well as other very large components. for cid in range(1, _num_cc): if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH: # median is more stable than centroid # center = centroids[largest_component_id] center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1] self.draw_text(text, center, color=color) def _convert_keypoints(self, keypoints):
# Copyright (c) Facebook, Inc. and its affiliates. logger = logging.getLogger(__name__) __all__ = ["ColorMode", "VisImage", "Visualizer"] _SMALL_OBJECT_AREA_THRESH = 1000 _LARGE_MASK_AREA_THRESH = 120000 _OFF_WHITE = (1.0, 1.0, 240.0 / 255) _BLACK = (0, 0, 0) _RED = (1.0, 0, 0) _KEYPOINT_THRESHOLD = 0.05 @unique class ColorMode(Enum): """ Enum of different color modes to use for instance visualizations. """ IMAGE = 0 """ Picks a random color for every instance and overlay segmentations with low opacity. """ SEGMENTATION = 1 """ Let instances of the same category have similar colors (from metadata.thing_colors), and overlay them with high opacity. This provides more attention on the quality of segmentation. """ IMAGE_BW = 2 """ Same as IMAGE, but convert all areas without masks to gray-scale. Only available for drawing per-instance mask predictions. """ class GenericMask: """ Attribute: polygons (list[ndarray]): list[ndarray]: polygons for this mask. Each ndarray has format [x, y, x, y, ...] mask (ndarray): a binary mask """ def __init__(self, mask_or_polygons, height, width): self._mask = self._polygons = self._has_holes = None self.height = height self.width = width m = mask_or_polygons if isinstance(m, dict): # RLEs assert "counts" in m and "size" in m if isinstance(m["counts"], list): # uncompressed RLEs h, w = m["size"] assert h == height and w == width m = mask_util.frPyObjects(m, h, w) self._mask = mask_util.decode(m)[:, :] return if isinstance(m, list): # list[ndarray] self._polygons = [np.asarray(x).reshape(-1) for x in m] return if isinstance(m, np.ndarray): # assumed to be a binary mask assert m.shape[1] != 2, m.shape assert m.shape == ( height, width, ), f"mask shape: {m.shape}, target dims: {height}, {width}" self._mask = m.astype("uint8") return raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m))) @property def mask(self): if self._mask is None: self._mask = self.polygons_to_mask(self._polygons) return self._mask @property def polygons(self): if self._polygons is None: self._polygons, self._has_holes = self.mask_to_polygons(self._mask) return self._polygons @property def has_holes(self): if self._has_holes is None: if self._mask is not None: self._polygons, self._has_holes = self.mask_to_polygons(self._mask) else: self._has_holes = False # if original format is polygon, does not have holes return self._has_holes def mask_to_polygons(self, mask): # cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level # hierarchy. External contours (boundary) of the object are placed in hierarchy-1. # Internal contours (holes) are placed in hierarchy-2. # cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours. mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) hierarchy = res[-1] if hierarchy is None: # empty mask return [], False has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0 res = res[-2] res = [x.flatten() for x in res] # These coordinates from OpenCV are integers in range [0, W-1 or H-1]. # We add 0.5 to turn them into real-value coordinate space. A better solution # would be to first +0.5 and then dilate the returned polygon by 0.5. res = [x + 0.5 for x in res if len(x) >= 6] return res, has_holes def polygons_to_mask(self, polygons): rle = mask_util.frPyObjects(polygons, self.height, self.width) rle = mask_util.merge(rle) return mask_util.decode(rle)[:, :] def area(self): return self.mask.sum() def bbox(self): p = mask_util.frPyObjects(self.polygons, self.height, self.width) p = mask_util.merge(p) bbox = mask_util.toBbox(p) bbox[2] += bbox[0] bbox[3] += bbox[1] return bbox class _PanopticPrediction: """ Unify different panoptic annotation/prediction formats """ def __init__(self, panoptic_seg, segments_info, metadata=None): if segments_info is None: assert metadata is not None # If "segments_info" is None, we assume "panoptic_img" is a # H*W int32 image storing the panoptic_id in the format of # category_id * label_divisor + instance_id. We reserve -1 for # VOID label. label_divisor = metadata.label_divisor segments_info = [] for panoptic_label in np.unique(panoptic_seg.numpy()): if panoptic_label == -1: # VOID region. continue pred_class = panoptic_label // label_divisor isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values() segments_info.append( { "id": int(panoptic_label), "category_id": int(pred_class), "isthing": bool(isthing), } ) del metadata self._seg = panoptic_seg self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True) areas = areas.numpy() sorted_idxs = np.argsort(-areas) self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs] self._seg_ids = self._seg_ids.tolist() for sid, area in zip(self._seg_ids, self._seg_areas): if sid in self._sinfo: self._sinfo[sid]["area"] = float(area) def non_empty_mask(self): """ Returns: (H, W) array, a mask for all pixels that have a prediction """ empty_ids = [] for id in self._seg_ids: if id not in self._sinfo: empty_ids.append(id) if len(empty_ids) == 0: return np.zeros(self._seg.shape, dtype=np.uint8) assert ( len(empty_ids) == 1 ), ">1 ids corresponds to no labels. This is currently not supported" return (self._seg != empty_ids[0]).numpy().astype(bool) def semantic_masks(self): for sid in self._seg_ids: sinfo = self._sinfo.get(sid) if sinfo is None or sinfo["isthing"]: # Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions. continue yield (self._seg == sid).numpy().astype(bool), sinfo def instance_masks(self): for sid in self._seg_ids: sinfo = self._sinfo.get(sid) if sinfo is None or not sinfo["isthing"]: continue mask = (self._seg == sid).numpy().astype(bool) if mask.sum() > 0: yield mask, sinfo def _create_text_labels(classes, scores, class_names, is_crowd=None): """ Args: classes (list[int] or None): scores (list[float] or None): class_names (list[str] or None): is_crowd (list[bool] or None): Returns: list[str] or None """ labels = None if classes is not None: if class_names is not None and len(class_names) > 0: labels = [class_names[i] for i in classes] else: labels = [str(i) for i in classes] if scores is not None: if labels is None: labels = ["{:.0f}%".format(s * 100) for s in scores] else: labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)] if labels is not None and is_crowd is not None: labels = [l + ("|crowd" if crowd else "") for l, crowd in zip(labels, is_crowd)] return labels class VisImage: def __init__(self, img, scale=1.0): """ Args: img (ndarray): an RGB image of shape (H, W, 3) in range [0, 255]. scale (float): scale the input image """ self.img = img self.scale = scale self.width, self.height = img.shape[1], img.shape[0] self._setup_figure(img) def _setup_figure(self, img): """ Args: Same as in :meth:`__init__()`. Returns: fig (matplotlib.pyplot.figure): top level container for all the image plot elements. ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system. """ fig = mplfigure.Figure(frameon=False) self.dpi = fig.get_dpi() # add a small 1e-2 to avoid precision lost due to matplotlib's truncation # (https://github.com/matplotlib/matplotlib/issues/15363) fig.set_size_inches( (self.width * self.scale + 1e-2) / self.dpi, (self.height * self.scale + 1e-2) / self.dpi, ) self.canvas = FigureCanvasAgg(fig) # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig) ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) ax.axis("off") self.fig = fig self.ax = ax self.reset_image(img) def reset_image(self, img): """ Args: img: same as in __init__ """ img = img.astype("uint8") self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest") def save(self, filepath): """ Args: filepath (str): a string that contains the absolute path, including the file name, where the visualized image will be saved. """ self.fig.savefig(filepath) def get_image(self): """ Returns: ndarray: the visualized image of shape (H, W, 3) (RGB) in uint8 type. The shape is scaled w.r.t the input image using the given `scale` argument. """ canvas = self.canvas s, (width, height) = canvas.print_to_buffer() # buf = io.BytesIO() # works for cairo backend # canvas.print_rgba(buf) # width, height = self.width, self.height # s = buf.getvalue() buffer = np.frombuffer(s, dtype="uint8") img_rgba = buffer.reshape(height, width, 4) rgb, alpha = np.split(img_rgba, [3], axis=2) return rgb.astype("uint8") class Visualizer: """ Visualizer that draws data about detection/segmentation on images. It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}` that draw primitive objects to images, as well as high-level wrappers like `draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}` that draw composite data in some pre-defined style. Note that the exact visualization style for the high-level wrappers are subject to change. Style such as color, opacity, label contents, visibility of labels, or even the visibility of objects themselves (e.g. when the object is too small) may change according to different heuristics, as long as the results still look visually reasonable. To obtain a consistent style, you can implement custom drawing functions with the abovementioned primitive methods instead. If you need more customized visualization styles, you can process the data yourself following their format documented in tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not intend to satisfy everyone's preference on drawing styles. This visualizer focuses on high rendering quality rather than performance. It is not designed to be used for real-time applications. """ # TODO implement a fast, rasterized version using OpenCV def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE): """ Args: img_rgb: a numpy array of shape (H, W, C), where H and W correspond to the height and width of the image respectively. C is the number of color channels. The image is required to be in RGB format since that is a requirement of the Matplotlib library. The image is also expected to be in the range [0, 255]. metadata (Metadata): dataset metadata (e.g. class names and colors) instance_mode (ColorMode): defines one of the pre-defined style for drawing instances on an image. """ self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8) if metadata is None: metadata = MetadataCatalog.get("__nonexist__") self.metadata = metadata self.output = VisImage(self.img, scale=scale) self.cpu_device = torch.device("cpu") # too small texts are useless, therefore clamp to 9 self._default_font_size = max( np.sqrt(self.output.height * self.output.width) // 90, 10 // scale ) self._instance_mode = instance_mode self.keypoint_threshold = _KEYPOINT_THRESHOLD def draw_instance_predictions(self, predictions): """ Draw instance-level prediction results on an image. Args: predictions (Instances): the output of an instance detection/segmentation model. Following fields will be used to draw: "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle"). Returns: output (VisImage): image object with visualizations. """ boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None scores = predictions.scores if predictions.has("scores") else None classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None)) keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None if predictions.has("pred_masks"): masks = np.asarray(predictions.pred_masks) masks = [GenericMask(x, self.output.height, self.output.width) for x in masks] else: masks = None if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): colors = [ self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes ] alpha = 0.8 else: colors = None alpha = 0.5 if self._instance_mode == ColorMode.IMAGE_BW: self.output.reset_image( self._create_grayscale_image( (predictions.pred_masks.any(dim=0) > 0).numpy() if predictions.has("pred_masks") else None ) ) alpha = 0.3 self.overlay_instances( masks=masks, boxes=boxes, labels=labels, keypoints=keypoints, assigned_colors=colors, alpha=alpha, ) return self.output def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8): """ Draw semantic segmentation predictions/labels. Args: sem_seg (Tensor or ndarray): the segmentation of shape (H, W). Each value is the integer label of the pixel. area_threshold (int): segments with less than `area_threshold` are not drawn. alpha (float): the larger it is, the more opaque the segmentations are. Returns: output (VisImage): image object with visualizations. """ if isinstance(sem_seg, torch.Tensor): sem_seg = sem_seg.numpy() labels, areas = np.unique(sem_seg, return_counts=True) sorted_idxs = np.argsort(-areas).tolist() labels = labels[sorted_idxs] for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels): try: mask_color = [x / 255 for x in self.metadata.stuff_colors[label]] except (AttributeError, IndexError): mask_color = None binary_mask = (sem_seg == label).astype(np.uint8) text = self.metadata.stuff_classes[label] self.draw_binary_mask( binary_mask, color=mask_color, edge_color=_OFF_WHITE, text=text, alpha=alpha, area_threshold=area_threshold, ) return self.output def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7): """ Draw panoptic prediction annotations or results. Args: panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. segments_info (list[dict] or None): Describe each segment in `panoptic_seg`. If it is a ``list[dict]``, each dict contains keys "id", "category_id". If None, category id of each pixel is computed by ``pixel // metadata.label_divisor``. area_threshold (int): stuff segments with less than `area_threshold` are not drawn. Returns: output (VisImage): image object with visualizations. """ pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata) if self._instance_mode == ColorMode.IMAGE_BW: self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask())) # draw mask for all semantic segments first i.e. "stuff" for mask, sinfo in pred.semantic_masks(): category_idx = sinfo["category_id"] try: mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]] except AttributeError: mask_color = None text = self.metadata.stuff_classes[category_idx] self.draw_binary_mask( mask, color=mask_color, edge_color=_OFF_WHITE, text=text, alpha=alpha, area_threshold=area_threshold, ) # draw mask for all instances second all_instances = list(pred.instance_masks()) if len(all_instances) == 0: return self.output masks, sinfo = list(zip(*all_instances)) category_ids = [x["category_id"] for x in sinfo] try: scores = [x["score"] for x in sinfo] except KeyError: scores = None labels = _create_text_labels( category_ids, scores, self.metadata.thing_classes, [x.get("iscrowd", 0) for x in sinfo] ) try: colors = [ self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids ] except AttributeError: colors = None self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha) return self.output draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility def draw_dataset_dict(self, dic): """ Draw annotations/segmentations in Detectron2 Dataset format. Args: dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format. Returns: output (VisImage): image object with visualizations. """ annos = dic.get("annotations", None) if annos: if "segmentation" in annos[0]: masks = [x["segmentation"] for x in annos] else: masks = None if "keypoints" in annos[0]: keypts = [x["keypoints"] for x in annos] keypts = np.array(keypts).reshape(len(annos), -1, 3) else: keypts = None boxes = [ BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) if len(x["bbox"]) == 4 else x["bbox"] for x in annos ] colors = None category_ids = [x["category_id"] for x in annos] if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): colors = [ self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids ] names = self.metadata.get("thing_classes", None) labels = _create_text_labels( category_ids, scores=None, class_names=names, is_crowd=[x.get("iscrowd", 0) for x in annos], ) self.overlay_instances( labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors ) sem_seg = dic.get("sem_seg", None) if sem_seg is None and "sem_seg_file_name" in dic: with PathManager.open(dic["sem_seg_file_name"], "rb") as f: sem_seg = Image.open(f) sem_seg = np.asarray(sem_seg, dtype="uint8") if sem_seg is not None: self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5) pan_seg = dic.get("pan_seg", None) if pan_seg is None and "pan_seg_file_name" in dic: with PathManager.open(dic["pan_seg_file_name"], "rb") as f: pan_seg = Image.open(f) pan_seg = np.asarray(pan_seg) pan_seg = rgb2id(pan_seg) if pan_seg is not None: segments_info = dic["segments_info"] pan_seg = torch.tensor(pan_seg) self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5) return self.output def overlay_instances( self, *, boxes=None, labels=None, masks=None, keypoints=None, assigned_colors=None, alpha=0.5, ): """ Args: boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`, or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image, or a :class:`RotatedBoxes`, or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format for the N objects in a single image, labels (list[str]): the text to be displayed for each instance. masks (masks-like object): Supported types are: * :class:`detectron2.structures.PolygonMasks`, :class:`detectron2.structures.BitMasks`. * list[list[ndarray]]: contains the segmentation masks for all objects in one image. The first level of the list corresponds to individual instances. The second level to all the polygon that compose the instance, and the third level to the polygon coordinates. The third level should have the format of [x0, y0, x1, y1, ..., xn, yn] (n >= 3). * list[ndarray]: each ndarray is a binary mask of shape (H, W). * list[dict]: each dict is a COCO-style RLE. keypoints (Keypoint or array like): an array-like object of shape (N, K, 3), where the N is the number of instances and K is the number of keypoints. The last dimension corresponds to (x, y, visibility or score). assigned_colors (list[matplotlib.colors]): a list of colors, where each color corresponds to each mask or box in the image. Refer to 'matplotlib.colors' for full list of formats that the colors are accepted in. Returns: output (VisImage): image object with visualizations. """ num_instances = 0 if boxes is not None: boxes = self._convert_boxes(boxes) num_instances = len(boxes) if masks is not None: masks = self._convert_masks(masks) if num_instances: assert len(masks) == num_instances else: num_instances = len(masks) if keypoints is not None: if num_instances: assert len(keypoints) == num_instances else: num_instances = len(keypoints) keypoints = self._convert_keypoints(keypoints) if labels is not None: assert len(labels) == num_instances if assigned_colors is None: assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] if num_instances == 0: return self.output if boxes is not None and boxes.shape[1] == 5: return self.overlay_rotated_instances( boxes=boxes, labels=labels, assigned_colors=assigned_colors ) # Display in largest to smallest order to reduce occlusion. areas = None if boxes is not None: areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1) elif masks is not None: areas = np.asarray([x.area() for x in masks]) if areas is not None: sorted_idxs = np.argsort(-areas).tolist() # Re-order overlapped instances in descending order. boxes = boxes[sorted_idxs] if boxes is not None else None labels = [labels[k] for k in sorted_idxs] if labels is not None else None masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None assigned_colors = [assigned_colors[idx] for idx in sorted_idxs] keypoints = keypoints[sorted_idxs] if keypoints is not None else None for i in range(num_instances): color = assigned_colors[i] if boxes is not None: self.draw_box(boxes[i], edge_color=color) if masks is not None: for segment in masks[i].polygons: self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha) if labels is not None: # first get a box if boxes is not None: x0, y0, x1, y1 = boxes[i] text_pos = (x0, y0) # if drawing boxes, put text on the box corner. horiz_align = "left" elif masks is not None: # skip small mask without polygon if len(masks[i].polygons) == 0: continue x0, y0, x1, y1 = masks[i].bbox() # draw text in the center (defined by median) when box is not drawn # median is less sensitive to outliers. text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1] horiz_align = "center" else: continue # drawing the box confidence for keypoints isn't very useful. # for small objects, draw text at the side to avoid occlusion instance_area = (y1 - y0) * (x1 - x0) if ( instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale or y1 - y0 < 40 * self.output.scale ): if y1 >= self.output.height - 5: text_pos = (x1, y0) else: text_pos = (x0, y1) height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width) lighter_color = self._change_color_brightness(color, brightness_factor=0.7) font_size = ( np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size ) self.draw_text( labels[i], text_pos, color=lighter_color, horizontal_alignment=horiz_align, font_size=font_size, ) # draw keypoints if keypoints is not None: for keypoints_per_instance in keypoints: self.draw_and_connect_keypoints(keypoints_per_instance) return self.output def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None): """ Args: boxes (ndarray): an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format for the N objects in a single image. labels (list[str]): the text to be displayed for each instance. assigned_colors (list[matplotlib.colors]): a list of colors, where each color corresponds to each mask or box in the image. Refer to 'matplotlib.colors' for full list of formats that the colors are accepted in. Returns: output (VisImage): image object with visualizations. """ num_instances = len(boxes) if assigned_colors is None: assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] if num_instances == 0: return self.output # Display in largest to smallest order to reduce occlusion. if boxes is not None: areas = boxes[:, 2] * boxes[:, 3] sorted_idxs = np.argsort(-areas).tolist() # Re-order overlapped instances in descending order. boxes = boxes[sorted_idxs] labels = [labels[k] for k in sorted_idxs] if labels is not None else None colors = [assigned_colors[idx] for idx in sorted_idxs] for i in range(num_instances): self.draw_rotated_box_with_label( boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None ) return self.output def draw_and_connect_keypoints(self, keypoints): """ Draws keypoints of an instance and follows the rules for keypoint connections to draw lines between appropriate keypoints. This follows color heuristics for line color. Args: keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints and the last dimension corresponds to (x, y, probability). Returns: output (VisImage): image object with visualizations. """ visible = {} keypoint_names = self.metadata.get("keypoint_names") for idx, keypoint in enumerate(keypoints): # draw keypoint x, y, prob = keypoint if prob > self.keypoint_threshold: self.draw_circle((x, y), color=_RED) if keypoint_names: keypoint_name = keypoint_names[idx] visible[keypoint_name] = (x, y) if self.metadata.get("keypoint_connection_rules"): for kp0, kp1, color in self.metadata.keypoint_connection_rules: if kp0 in visible and kp1 in visible: x0, y0 = visible[kp0] x1, y1 = visible[kp1] color = tuple(x / 255.0 for x in color) self.draw_line([x0, x1], [y0, y1], color=color) # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip # Note that this strategy is specific to person keypoints. # For other keypoints, it should just do nothing try: ls_x, ls_y = visible["left_shoulder"] rs_x, rs_y = visible["right_shoulder"] mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2 except KeyError: pass else: # draw line from nose to mid-shoulder nose_x, nose_y = visible.get("nose", (None, None)) if nose_x is not None: self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED) try: # draw line from mid-shoulder to mid-hip lh_x, lh_y = visible["left_hip"] rh_x, rh_y = visible["right_hip"] except KeyError: pass else: mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2 self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED) return self.output """ Primitive drawing functions: """ def draw_text( self, text, position, *, font_size=None, color="g", horizontal_alignment="center", rotation=0, ): """ Args: text (str): class label position (tuple): a tuple of the x and y coordinates to place text on image. font_size (int, optional): font of the text. If not provided, a font size proportional to the image width is calculated and used. color: color of the text. Refer to `matplotlib.colors` for full list of formats that are accepted. horizontal_alignment (str): see `matplotlib.text.Text` rotation: rotation angle in degrees CCW Returns: output (VisImage): image object with text drawn. """ if not font_size: font_size = self._default_font_size # since the text background is dark, we don't want the text to be dark color = np.maximum(list(mplc.to_rgb(color)), 0.2) color[np.argmax(color)] = max(0.8, np.max(color)) x, y = position self.output.ax.text( x, y, text, size=font_size * self.output.scale, family="sans-serif", bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"}, verticalalignment="top", horizontalalignment=horizontal_alignment, color=color, zorder=10, rotation=rotation, ) return self.output def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"): """ Args: box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0 are the coordinates of the image's top left corner. x1 and y1 are the coordinates of the image's bottom right corner. alpha (float): blending efficient. Smaller values lead to more transparent masks. edge_color: color of the outline of the box. Refer to `matplotlib.colors` for full list of formats that are accepted. line_style (string): the string to use to create the outline of the boxes. Returns: output (VisImage): image object with box drawn. """ x0, y0, x1, y1 = box_coord width = x1 - x0 height = y1 - y0 linewidth = max(self._default_font_size / 4, 1) self.output.ax.add_patch( mpl.patches.Rectangle( (x0, y0), width, height, fill=False, edgecolor=edge_color, linewidth=linewidth * self.output.scale, alpha=alpha, linestyle=line_style, ) ) return self.output def draw_rotated_box_with_label( self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None ): """ Draw a rotated box with label on its top-left corner. Args: rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle), where cnt_x and cnt_y are the center coordinates of the box. w and h are the width and height of the box. angle represents how many degrees the box is rotated CCW with regard to the 0-degree box. alpha (float): blending efficient. Smaller values lead to more transparent masks. edge_color: color of the outline of the box. Refer to `matplotlib.colors` for full list of formats that are accepted. line_style (string): the string to use to create the outline of the boxes. label (string): label for rotated box. It will not be rendered when set to None. Returns: output (VisImage): image object with box drawn. """ cnt_x, cnt_y, w, h, angle = rotated_box area = w * h # use thinner lines when the box is small linewidth = self._default_font_size / ( 6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3 ) theta = angle * math.pi / 180.0 c = math.cos(theta) s = math.sin(theta) rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)] # x: left->right ; y: top->down rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect] for k in range(4): j = (k + 1) % 4 self.draw_line( [rotated_rect[k][0], rotated_rect[j][0]], [rotated_rect[k][1], rotated_rect[j][1]], color=edge_color, linestyle="--" if k == 1 else line_style, linewidth=linewidth, ) if label is not None: text_pos = rotated_rect[1] # topleft corner height_ratio = h / np.sqrt(self.output.height * self.output.width) label_color = self._change_color_brightness(edge_color, brightness_factor=0.7) font_size = ( np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size ) self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle) return self.output def draw_circle(self, circle_coord, color, radius=3): """ Args: circle_coord (list(int) or tuple(int)): contains the x and y coordinates of the center of the circle. color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. radius (int): radius of the circle. Returns: output (VisImage): image object with box drawn. """ x, y = circle_coord self.output.ax.add_patch( mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color) ) return self.output def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None): """ Args: x_data (list[int]): a list containing x values of all the points being drawn. Length of list should match the length of y_data. y_data (list[int]): a list containing y values of all the points being drawn. Length of list should match the length of x_data. color: color of the line. Refer to `matplotlib.colors` for a full list of formats that are accepted. linestyle: style of the line. Refer to `matplotlib.lines.Line2D` for a full list of formats that are accepted. linewidth (float or None): width of the line. When it's None, a default value will be computed and used. Returns: output (VisImage): image object with line drawn. """ if linewidth is None: linewidth = self._default_font_size / 3 linewidth = max(linewidth, 1) self.output.ax.add_line( mpl.lines.Line2D( x_data, y_data, linewidth=linewidth * self.output.scale, color=color, linestyle=linestyle, ) ) return self.output def draw_binary_mask( self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=10 ): """ Args: binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and W is the image width. Each value in the array is either a 0 or 1 value of uint8 type. color: color of the mask. Refer to `matplotlib.colors` for a full list of formats that are accepted. If None, will pick a random color. edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a full list of formats that are accepted. text (str): if None, will be drawn on the object alpha (float): blending efficient. Smaller values lead to more transparent masks. area_threshold (float): a connected component smaller than this area will not be shown. Returns: output (VisImage): image object with mask drawn. """ if color is None: color = random_color(rgb=True, maximum=1) color = mplc.to_rgb(color) has_valid_segment = False binary_mask = binary_mask.astype("uint8") # opencv needs uint8 mask = GenericMask(binary_mask, self.output.height, self.output.width) shape2d = (binary_mask.shape[0], binary_mask.shape[1]) if not mask.has_holes: # draw polygons for regular masks for segment in mask.polygons: area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1])) if area < (area_threshold or 0): continue has_valid_segment = True segment = segment.reshape(-1, 2) self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha) else: # TODO: Use Path/PathPatch to draw vector graphics: # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon rgba = np.zeros(shape2d + (4,), dtype="float32") rgba[:, :, :3] = color rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha has_valid_segment = True self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) if text is not None and has_valid_segment: lighter_color = self._change_color_brightness(color, brightness_factor=0.7) self._draw_text_in_mask(binary_mask, text, lighter_color) return self.output def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5): """ Args: soft_mask (ndarray): float array of shape (H, W), each value in [0, 1]. color: color of the mask. Refer to `matplotlib.colors` for a full list of formats that are accepted. If None, will pick a random color. text (str): if None, will be drawn on the object alpha (float): blending efficient. Smaller values lead to more transparent masks. Returns: output (VisImage): image object with mask drawn. """ if color is None: color = random_color(rgb=True, maximum=1) color = mplc.to_rgb(color) shape2d = (soft_mask.shape[0], soft_mask.shape[1]) rgba = np.zeros(shape2d + (4,), dtype="float32") rgba[:, :, :3] = color rgba[:, :, 3] = soft_mask * alpha self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) if text is not None: lighter_color = self._change_color_brightness(color, brightness_factor=0.7) binary_mask = (soft_mask > 0.5).astype("uint8") self._draw_text_in_mask(binary_mask, text, lighter_color) return self.output def draw_polygon(self, segment, color, edge_color=None, alpha=0.5): """ Args: segment: numpy array of shape Nx2, containing all the points in the polygon. color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a full list of formats that are accepted. If not provided, a darker shade of the polygon color will be used instead. alpha (float): blending efficient. Smaller values lead to more transparent masks. Returns: output (VisImage): image object with polygon drawn. """ if edge_color is None: # make edge color darker than the polygon color if alpha > 0.8: edge_color = self._change_color_brightness(color, brightness_factor=-0.7) else: edge_color = color edge_color = mplc.to_rgb(edge_color) + (1,) polygon = mpl.patches.Polygon( segment, fill=True, facecolor=mplc.to_rgb(color) + (alpha,), edgecolor=edge_color, linewidth=max(self._default_font_size // 15 * self.output.scale, 1), ) self.output.ax.add_patch(polygon) return self.output """ Internal methods: """ def _jitter(self, color): """ Randomly modifies given color to produce a slightly different color than the color given. Args: color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color picked. The values in the list are in the [0.0, 1.0] range. Returns: jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color after being jittered. The values in the list are in the [0.0, 1.0] range. """ color = mplc.to_rgb(color) vec = np.random.rand(3) # better to do it in another color space vec = vec / np.linalg.norm(vec) * 0.5 res = np.clip(vec + color, 0, 1) return tuple(res) def _create_grayscale_image(self, mask=None): """ Create a grayscale version of the original image. The colors in masked area, if given, will be kept. """ img_bw = self.img.astype("f4").mean(axis=2) img_bw = np.stack([img_bw] * 3, axis=2) if mask is not None: img_bw[mask] = self.img[mask] return img_bw def _change_color_brightness(self, color, brightness_factor): """ Depending on the brightness_factor, gives a lighter or darker color i.e. a color with less or more saturation than the original color. Args: color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of 0 will correspond to no change, a factor in [-1.0, 0) range will result in a darker color and a factor in (0, 1.0] range will result in a lighter color. Returns: modified_color (tuple[double]): a tuple containing the RGB values of the modified color. Each value in the tuple is in the [0.0, 1.0] range. """ assert brightness_factor >= -1.0 and brightness_factor <= 1.0 color = mplc.to_rgb(color) polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) return tuple(np.clip(modified_color, 0.0, 1.0)) def _convert_boxes(self, boxes): """ Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension. """ if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes): return boxes.tensor.detach().numpy() else: return np.asarray(boxes) def _convert_masks(self, masks_or_polygons): """ Convert different format of masks or polygons to a tuple of masks and polygons. Returns: list[GenericMask]: """ m = masks_or_polygons if isinstance(m, PolygonMasks): m = m.polygons if isinstance(m, BitMasks): m = m.tensor.numpy() if isinstance(m, torch.Tensor): m = m.numpy() ret = [] for x in m: if isinstance(x, GenericMask): ret.append(x) else: ret.append(GenericMask(x, self.output.height, self.output.width)) return ret def _draw_text_in_mask(self, binary_mask, text, color): """ Find proper places to draw text given a binary mask. """ # TODO sometimes drawn on wrong objects. the heuristics here can improve. _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8) if stats[1:, -1].size == 0: return largest_component_id = np.argmax(stats[1:, -1]) + 1 # draw text on the largest component, as well as other very large components. for cid in range(1, _num_cc): if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH: # median is more stable than centroid # center = centroids[largest_component_id] center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1] self.draw_text(text, center, color=color) def _convert_keypoints(self, keypoints):
if isinstance(keypoints, Keypoints):
3
2023-12-10 20:14:00+00:00
24k
mkang315/ASF-YOLO
segment/val.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=Tr...
import argparse import json import os import sys import numpy as np import torch import torch.nn.functional as F import time from multiprocessing.pool import ThreadPool from pathlib import Path from tqdm import tqdm from models.common import DetectMultiBackend from models.yolo import SegmentationModel from utils.callbacks import Callbacks from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, box_iou from utils.plots import output_to_target, plot_val_study from utils.segment.dataloaders import create_dataloader from utils.segment.general import mask_iou, process_mask, process_mask_upsample, scale_image from utils.segment.metrics import Metrics, ap_per_class_box_and_mask from utils.segment.plots import plot_images_and_masks from utils.torch_utils import de_parallel, select_device, smart_inference_mode from pycocotools.mask import encode from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
21,426
labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels correct_bboxes = process_batch(predn, labelsn, iouv) correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True) if plots: confusion_matrix.process_batch(predn, labelsn) stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # (conf, pcls, tcls) pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) if plots and batch_i < 3: plot_masks.append(pred_masks[:15].cpu()) # filter top 15 to plot # Save/log if save_txt: save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') if save_json: pred_masks = scale_image(im[si].shape[1:], pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1]) save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary # callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) # Plot images if plots and batch_i < 3: if len(plot_masks): plot_masks = torch.cat(plot_masks, dim=0) plot_images_and_masks(im, targets, masks, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) plot_images_and_masks(im, output_to_target(preds, max_det=15), plot_masks, paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred # callbacks.run('on_val_batch_end') # Compute metrics stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy if len(stats) and stats[0].any(): results = ap_per_class_box_and_mask(*stats, plot=plots, save_dir=save_dir, names=names) metrics.update(results) nt = np.bincount(stats[4].astype(int), minlength=nc) # number of targets per class # Print results pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results())) if nt.sum() == 0: LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') # Print results per class if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): for i, c in enumerate(metrics.ap_class_index): LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i))) # Print speeds t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image if not training: shape = (batch_size, 3, imgsz, imgsz) LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) # callbacks.run('on_val_end') mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask = metrics.mean_results() # Save JSON if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json pred_json = str(save_dir / f"{w}_predictions.json") # predictions json LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb anno = COCO(anno_json) # init annotations api pred = anno.loadRes(pred_json) # init predictions api results = [] for eval in COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm'): if is_coco: eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # img ID to evaluate eval.evaluate() eval.accumulate() eval.summarize() results.extend(eval.stats[:2]) # update results (mAP@0.5:0.95, mAP@0.5) map_bbox, map50_bbox, map_mask, map50_mask = results except Exception as e: LOGGER.info(f'pycocotools unable to run: {e}') # Return results model.float() # for training if not training: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") final_metric = mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask return (*final_metric, *(loss.cpu() / len(dataloader)).tolist()), metrics.get_maps(nc), t def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default=ROOT / 'data/BCC.yaml', help='dataset.yaml path') parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'runs_2/train-seg/base/weights/best.pt', help='model path(s)') parser.add_argument('--batch-size', type=int, default=1, help='batch size') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.01, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image') parser.add_argument('--task', default='val', help='train, val, test, speed or study') parser.add_argument('--device', default='6', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--verbose', action='store_true', help='report mAP by class') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') parser.add_argument('--project', default=ROOT / 'runs_2/val_test', help='save results to project/name') parser.add_argument('--name', default='base', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args()
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Validate a trained YOLOv5 segment model on a segment dataset Usage: $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images) $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate COCO-segments Usage - formats: $ python segment/val.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s-seg_openvino_label # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel yolov5s-seg.pb # TensorFlow GraphDef yolov5s-seg.tflite # TensorFlow Lite yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU yolov5s-seg_paddle_model # PaddlePaddle """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative def save_one_txt(predn, save_conf, shape, file): # Save one txt result gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(file, 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') def save_one_json(predn, jdict, path, class_map, pred_masks): # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} def single_encode(x): rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0] rle["counts"] = rle["counts"].decode("utf-8") return rle image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner pred_masks = np.transpose(pred_masks, (2, 0, 1)) with ThreadPool(NUM_THREADS) as pool: rles = pool.map(single_encode, pred_masks) for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): jdict.append({ 'image_id': image_id, 'category_id': class_map[int(p[5])], 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5), 'segmentation': rles[i]}) def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): """ Return correct prediction matrix Arguments: detections (array[N, 6]), x1, y1, x2, y2, conf, class labels (array[M, 5]), class, x1, y1, x2, y2 Returns: correct (array[N, 10]), for 10 IoU levels """ if masks: if overlap: nl = len(labels) index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes iou = box_iou(labels[:, 1:], detections[:, :4]) correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) correct_class = labels[:, 0:1] == detections[:, 5] for i in range(len(iouv)): x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] correct[matches[:, 1].astype(int), i] = True return torch.tensor(correct, dtype=torch.bool, device=iouv.device) @smart_inference_mode() def run( data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) conf_thres=0.001, # confidence threshold iou_thres=0.6, # NMS IoU threshold max_det=300, # maximum detections per image task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val-seg', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, overlap=False, mask_downsample_ratio=1, compute_loss=None, callbacks=Callbacks(), ): if save_json: check_requirements(['pycocotools']) process = process_mask_upsample # more accurate else: process = process_mask # faster # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() nm = de_parallel(model).model[-1].nm # number of masks else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half = model.fp16 # FP16 supported on limited backends with CUDA nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks if engine: batch_size = model.batch_size else: device = model.device if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1 LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') # Data data = check_dataset(data) # check # Configure model.eval() cuda = device.type != 'cpu' is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() # Dataloader if not training: if pt and not single_cls: # check --weights are trained on --data ncm = model.model.nc assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=rect, workers=workers, prefix=colorstr(f'{task}: '), overlap_mask=overlap, mask_downsample_ratio=mask_downsample_ratio)[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = model.names if hasattr(model, 'names') else model.module.names # get class names if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', "R", "mAP50", "mAP50-95)", "Mask(P", "R", "mAP50", "mAP50-95)") dt = Profile(), Profile(), Profile() metrics = Metrics() loss = torch.zeros(4, device=device) jdict, stats = [], [] # callbacks.run('on_val_start') pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar): # callbacks.run('on_val_batch_start') with dt[0]: if cuda: im = im.to(device, non_blocking=True) targets = targets.to(device) masks = masks.to(device) masks = masks.float() im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 nb, _, height, width = im.shape # batch size, channels, height, width # Inference with dt[1]: act = time.time() preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None) #print('time.time():',time.time()-act) # Loss if compute_loss: loss += compute_loss((train_out, protos), targets, masks)[1] # box, obj, cls # NMS targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling with dt[2]: preds = non_max_suppression(preds, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls, max_det=max_det, nm=nm) # Metrics plot_masks = [] # masks for plotting for si, (pred, proto) in enumerate(zip(preds, protos)): labels = targets[targets[:, 0] == si, 1:] nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions path, shape = Path(paths[si]), shapes[si][0] correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init seen += 1 if npr == 0: if nl: stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0])) if plots: confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) continue # Masks midx = [si] if overlap else targets[:, 0] == si gt_masks = masks[midx] pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:]) # Predictions if single_cls: pred[:, 5] = 0 predn = pred.clone() scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred # Evaluate if nl: tbox = xywh2xyxy(labels[:, 1:5]) # target boxes scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels correct_bboxes = process_batch(predn, labelsn, iouv) correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True) if plots: confusion_matrix.process_batch(predn, labelsn) stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # (conf, pcls, tcls) pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) if plots and batch_i < 3: plot_masks.append(pred_masks[:15].cpu()) # filter top 15 to plot # Save/log if save_txt: save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') if save_json: pred_masks = scale_image(im[si].shape[1:], pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1]) save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary # callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) # Plot images if plots and batch_i < 3: if len(plot_masks): plot_masks = torch.cat(plot_masks, dim=0) plot_images_and_masks(im, targets, masks, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) plot_images_and_masks(im, output_to_target(preds, max_det=15), plot_masks, paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred # callbacks.run('on_val_batch_end') # Compute metrics stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy if len(stats) and stats[0].any(): results = ap_per_class_box_and_mask(*stats, plot=plots, save_dir=save_dir, names=names) metrics.update(results) nt = np.bincount(stats[4].astype(int), minlength=nc) # number of targets per class # Print results pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results())) if nt.sum() == 0: LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') # Print results per class if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): for i, c in enumerate(metrics.ap_class_index): LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i))) # Print speeds t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image if not training: shape = (batch_size, 3, imgsz, imgsz) LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) # callbacks.run('on_val_end') mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask = metrics.mean_results() # Save JSON if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json pred_json = str(save_dir / f"{w}_predictions.json") # predictions json LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb anno = COCO(anno_json) # init annotations api pred = anno.loadRes(pred_json) # init predictions api results = [] for eval in COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm'): if is_coco: eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # img ID to evaluate eval.evaluate() eval.accumulate() eval.summarize() results.extend(eval.stats[:2]) # update results (mAP@0.5:0.95, mAP@0.5) map_bbox, map50_bbox, map_mask, map50_mask = results except Exception as e: LOGGER.info(f'pycocotools unable to run: {e}') # Return results model.float() # for training if not training: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") final_metric = mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask return (*final_metric, *(loss.cpu() / len(dataloader)).tolist()), metrics.get_maps(nc), t def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default=ROOT / 'data/BCC.yaml', help='dataset.yaml path') parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'runs_2/train-seg/base/weights/best.pt', help='model path(s)') parser.add_argument('--batch-size', type=int, default=1, help='batch size') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.01, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image') parser.add_argument('--task', default='val', help='train, val, test, speed or study') parser.add_argument('--device', default='6', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--verbose', action='store_true', help='report mAP by class') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') parser.add_argument('--project', default=ROOT / 'runs_2/val_test', help='save results to project/name') parser.add_argument('--name', default='base', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args()
opt.data = check_yaml(opt.data) # check YAML
10
2023-12-10 14:18:29+00:00
24k
youngskkim/CRN
exps/base_exp.py
[ { "identifier": "NuscDatasetRadarDet", "path": "datasets/nusc_det_dataset.py", "snippet": "class NuscDatasetRadarDet(Dataset):\n def __init__(self,\n ida_aug_conf,\n bda_aug_conf,\n rda_aug_conf,\n classes,\n data_root,\n...
from functools import partial from pytorch_lightning.core import LightningModule from torch.cuda.amp.autocast_mode import autocast from torch.optim.lr_scheduler import MultiStepLR from mmcv.runner import build_optimizer from datasets.nusc_det_dataset import NuscDatasetRadarDet, collate_fn from evaluators.det_evaluators import DetNuscEvaluator from models.base_bev_depth import BaseBEVDepth from utils.torch_dist import all_gather_object, synchronize import mmcv import torch import torch.nn.functional as F import torch.nn.parallel import torch.utils.data import torch.utils.data.distributed import torchvision.models as models
16,127
Output: gt_depths: [B*N*h*w, d] """ B, N, H, W = gt_depths.shape gt_depths = gt_depths.view( B * N, H // self.downsample_factor, self.downsample_factor, W // self.downsample_factor, self.downsample_factor, 1, ) gt_depths = gt_depths.permute(0, 1, 3, 5, 2, 4).contiguous() gt_depths = gt_depths.view( -1, self.downsample_factor * self.downsample_factor) gt_depths_tmp = torch.where(gt_depths == 0.0, 1e5 * torch.ones_like(gt_depths), gt_depths) gt_depths = torch.min(gt_depths_tmp, dim=-1).values gt_depths = gt_depths.view(B * N, H // self.downsample_factor, W // self.downsample_factor) gt_depths = (gt_depths - (self.dbound[0] - self.dbound[2])) / self.dbound[2] gt_depths = torch.where( (gt_depths < self.depth_channels + 1) & (gt_depths > 0.), gt_depths, torch.zeros_like(gt_depths)) gt_depths = F.one_hot(gt_depths.long(), num_classes=self.depth_channels + 1).view( -1, self.depth_channels + 1)[:, 1:] return gt_depths.float() def eval_step(self, batch, batch_idx, prefix: str): (sweep_imgs, mats, img_metas, _, _, _, _, pts_pv) = batch if torch.cuda.is_available(): if self.return_image: sweep_imgs = sweep_imgs.cuda() for key, value in mats.items(): mats[key] = value.cuda() if self.return_radar_pv: pts_pv = pts_pv.cuda() preds = self(sweep_imgs, mats, pts_pv=pts_pv, is_train=False) if isinstance(self.model, torch.nn.parallel.DistributedDataParallel): results = self.model.module.get_bboxes(preds, img_metas) else: results = self.model.get_bboxes(preds, img_metas) for i in range(len(results)): results[i][0] = results[i][0].tensor.detach().cpu().numpy() results[i][1] = results[i][1].detach().cpu().numpy() results[i][2] = results[i][2].detach().cpu().numpy() results[i].append(img_metas[i]) return results def validation_epoch_end(self, validation_step_outputs): detection_losses = list() heatmap_losses = list() bbox_losses = list() depth_losses = list() for validation_step_output in validation_step_outputs: detection_losses.append(validation_step_output[0]) heatmap_losses.append(validation_step_output[1]) bbox_losses.append(validation_step_output[2]) depth_losses.append(validation_step_output[3]) synchronize() self.log('val/detection', torch.mean(torch.stack(detection_losses)), on_epoch=True) self.log('val/heatmap', torch.mean(torch.stack(heatmap_losses)), on_epoch=True) self.log('val/bbox', torch.mean(torch.stack(bbox_losses)), on_epoch=True) self.log('val/depth', torch.mean(torch.stack(depth_losses)), on_epoch=True) def validation_step(self, batch, batch_idx): (sweep_imgs, mats, _, gt_boxes_3d, gt_labels_3d, _, depth_labels, pts_pv) = batch if torch.cuda.is_available(): if self.return_image: sweep_imgs = sweep_imgs.cuda() for key, value in mats.items(): mats[key] = value.cuda() if self.return_radar_pv: pts_pv = pts_pv.cuda() gt_boxes_3d = [gt_box.cuda() for gt_box in gt_boxes_3d] gt_labels_3d = [gt_label.cuda() for gt_label in gt_labels_3d] with torch.no_grad(): preds, depth_preds = self(sweep_imgs, mats, pts_pv=pts_pv, is_train=True) targets = self.model.get_targets(gt_boxes_3d, gt_labels_3d) loss_detection, loss_heatmap, loss_bbox = self.model.loss(targets, preds) if len(depth_labels.shape) == 5: # only key-frame will calculate depth loss depth_labels = depth_labels[:, 0, ...].contiguous() loss_depth = self.get_depth_loss(depth_labels.cuda(), depth_preds, weight=3.) return loss_detection, loss_heatmap, loss_bbox, loss_depth def test_epoch_end(self, test_step_outputs): all_pred_results = list() all_img_metas = list() for test_step_output in test_step_outputs: for i in range(len(test_step_output)): all_pred_results.append(test_step_output[i][:3]) all_img_metas.append(test_step_output[i][3]) synchronize() # TODO: Change another way. dataset_length = len(self.val_dataloader().dataset) all_pred_results = sum( map(list, zip(*all_gather_object(all_pred_results))), [])[:dataset_length] all_img_metas = sum(map(list, zip(*all_gather_object(all_img_metas))), [])[:dataset_length] if self.global_rank == 0: self.evaluator.evaluate(all_pred_results, all_img_metas) def configure_optimizers(self): optimizer = build_optimizer(self.model, self.optimizer_config) scheduler = MultiStepLR(optimizer, [19, 23]) return [[optimizer], [scheduler]] def train_dataloader(self):
# Copyright (c) Megvii Inc. All rights reserved. pretrain_config = dict( img_model_path=None, img_load_key=[], img_freeze_key=None, pts_model_path=None, pts_load_key=[]) optimizer_config = dict( type='AdamW', lr=2e-4, weight_decay=1e-2) H = 900 W = 1600 final_dim = (256, 704) img_conf = dict(img_mean=[123.675, 116.28, 103.53], img_std=[58.395, 57.12, 57.375], to_rgb=True) ida_aug_conf = { 'resize_lim': (0.386, 0.55), 'final_dim': final_dim, 'rot_lim': (-5.4, 5.4), 'H': 900, 'W': 1600, 'rand_flip': True, 'bot_pct_lim': (0.0, 0.0), 'cams': ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT'], 'Ncams': 6, } bda_aug_conf = { 'rot_ratio': 1.0, 'rot_lim': (-22.5, 22.5), 'scale_lim': (0.95, 1.05), 'flip_dx_ratio': 0.5, 'flip_dy_ratio': 0.5 } rda_aug_conf = { 'N_sweeps': 6, 'N_use': 5, 'drop_ratio': 0.1, } backbone_img_conf = { 'x_bound': [-51.2, 51.2, 0.8], 'y_bound': [-51.2, 51.2, 0.8], 'z_bound': [-5, 3, 8], 'd_bound': [2.0, 58.0, 0.8], 'final_dim': final_dim, 'output_channels': 80, 'downsample_factor': 16, 'img_backbone_conf': dict( type='ResNet', depth=50, frozen_stages=0, out_indices=[0, 1, 2, 3], norm_eval=False, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), ), 'img_neck_conf': dict( type='SECONDFPN', in_channels=[256, 512, 1024, 2048], upsample_strides=[0.25, 0.5, 1, 2], out_channels=[128, 128, 128, 128], ), 'depth_net_conf': dict(in_channels=512, mid_channels=512), 'camera_aware': True } CLASSES = [ 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone', ] head_conf = { 'bev_backbone_conf': dict( type='ResNet', in_channels=80, depth=18, num_stages=3, strides=(1, 2, 2), dilations=(1, 1, 1), out_indices=[0, 1, 2], norm_eval=False, base_channels=160), 'bev_neck_conf': dict( type='SECONDFPN', in_channels=[80, 160, 320, 640], upsample_strides=[1, 2, 4, 8], out_channels=[64, 64, 64, 64]), 'tasks': [ dict(num_class=1, class_names=['car']), dict(num_class=2, class_names=['truck', 'construction_vehicle']), dict(num_class=2, class_names=['bus', 'trailer']), dict(num_class=1, class_names=['barrier']), dict(num_class=2, class_names=['motorcycle', 'bicycle']), dict(num_class=2, class_names=['pedestrian', 'traffic_cone']),], 'common_heads': dict( reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2)), 'bbox_coder': dict( type='CenterPointBBoxCoder', post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_num=500, score_threshold=0.1, out_size_factor=4, voxel_size=[0.2, 0.2, 8], pc_range=[-51.2, -51.2, -5, 51.2, 51.2, 3], code_size=9), 'train_cfg': dict( point_cloud_range=[-51.2, -51.2, -5, 51.2, 51.2, 3], grid_size=[512, 512, 1], voxel_size=[0.2, 0.2, 8], out_size_factor=4, dense_reg=1, gaussian_overlap=0.1, max_objs=500, min_radius=2, code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5, 0.5]), 'test_cfg': dict( post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_per_img=500, max_pool_nms=False, min_radius=[4, 12, 10, 1, 0.85, 0.175], score_threshold=0.1, out_size_factor=4, voxel_size=[0.2, 0.2, 8], nms_type='circle', pre_max_size=1000, post_max_size=83, nms_thr=0.2), 'in_channels': 256, # Equal to bev_neck output_channels. 'loss_cls': dict(type='GaussianFocalLoss', reduction='mean'), 'loss_bbox': dict(type='L1Loss', reduction='mean', loss_weight=0.25), 'gaussian_overlap': 0.1, 'min_radius': 2, } class BEVDepthLightningModel(LightningModule): MODEL_NAMES = sorted(name for name in models.__dict__ if name.islower() and not name.startswith('__') and callable(models.__dict__[name])) def __init__(self, gpus: int = 1, data_root='data/nuScenes', eval_interval=1, batch_size_per_device=8, class_names=CLASSES, backbone_img_conf=backbone_img_conf, head_conf=head_conf, ida_aug_conf=ida_aug_conf, bda_aug_conf=bda_aug_conf, rda_aug_conf=rda_aug_conf, default_root_dir='./outputs/', **kwargs): super().__init__() self.save_hyperparameters() self.gpus = gpus self.optimizer_config = optimizer_config self.pretrain_config = pretrain_config self.eval_interval = eval_interval self.batch_size_per_device = batch_size_per_device self.data_root = data_root self.class_names = class_names self.backbone_img_conf = backbone_img_conf self.head_conf = head_conf self.ida_aug_conf = ida_aug_conf self.bda_aug_conf = bda_aug_conf self.rda_aug_conf = rda_aug_conf mmcv.mkdir_or_exist(default_root_dir) self.default_root_dir = default_root_dir self.evaluator = DetNuscEvaluator(class_names=self.class_names, output_dir=self.default_root_dir) self.model = BaseBEVDepth(self.backbone_img_conf, self.head_conf) self.mode = 'valid' self.img_conf = img_conf self.data_use_cbgs = False self.load_interval = 1 self.num_sweeps = 1 self.sweep_idxes = list() self.key_idxes = list() self.data_return_depth = True self.downsample_factor = self.backbone_img_conf['downsample_factor'] self.dbound = self.backbone_img_conf['d_bound'] self.depth_channels = int( (self.dbound[1] - self.dbound[0]) / self.dbound[2]) self.use_fusion = False self.train_info_paths = 'data/nuScenes/nuscenes_infos_train.pkl' self.val_info_paths = 'data/nuScenes/nuscenes_infos_val.pkl' self.predict_info_paths = 'data/nuScenes/nuscenes_infos_test.pkl' self.return_image = True self.return_depth = True self.return_radar_pv = False self.remove_z_axis = True def forward(self, sweep_imgs, mats, is_train=False, **inputs): return self.model(sweep_imgs, mats, is_train=is_train) def training_step(self, batch): if self.global_rank == 0: for pg in self.trainer.optimizers[0].param_groups: self.log('learning_rate', pg["lr"]) (sweep_imgs, mats, _, gt_boxes_3d, gt_labels_3d, _, depth_labels, pts_pv) = batch if torch.cuda.is_available(): if self.return_image: sweep_imgs = sweep_imgs.cuda() for key, value in mats.items(): mats[key] = value.cuda() if self.return_radar_pv: pts_pv = pts_pv.cuda() gt_boxes_3d = [gt_box.cuda() for gt_box in gt_boxes_3d] gt_labels_3d = [gt_label.cuda() for gt_label in gt_labels_3d] preds, depth_preds = self(sweep_imgs, mats, pts_pv=pts_pv, is_train=True) targets = self.model.get_targets(gt_boxes_3d, gt_labels_3d) loss_detection, loss_heatmap, loss_bbox = self.model.loss(targets, preds) if len(depth_labels.shape) == 5: # only key-frame will calculate depth loss depth_labels = depth_labels[:, 0, ...].contiguous() loss_depth = self.get_depth_loss(depth_labels.cuda(), depth_preds) self.log('train/detection', loss_detection) self.log('train/heatmap', loss_heatmap) self.log('train/bbox', loss_bbox) self.log('train/depth', loss_depth) return loss_detection + loss_depth def get_depth_loss(self, depth_labels, depth_preds, weight=3.): depth_labels = self.get_downsampled_gt_depth(depth_labels) depth_preds = depth_preds.permute(0, 2, 3, 1).contiguous().view( -1, self.depth_channels) fg_mask = torch.max(depth_labels, dim=1).values > 0.0 with autocast(enabled=False): loss_depth = (F.binary_cross_entropy( depth_preds[fg_mask], depth_labels[fg_mask], reduction='none', ).sum() / max(1.0, fg_mask.sum())) return weight * loss_depth def get_downsampled_gt_depth(self, gt_depths): """ Input: gt_depths: [B, N, H, W] Output: gt_depths: [B*N*h*w, d] """ B, N, H, W = gt_depths.shape gt_depths = gt_depths.view( B * N, H // self.downsample_factor, self.downsample_factor, W // self.downsample_factor, self.downsample_factor, 1, ) gt_depths = gt_depths.permute(0, 1, 3, 5, 2, 4).contiguous() gt_depths = gt_depths.view( -1, self.downsample_factor * self.downsample_factor) gt_depths_tmp = torch.where(gt_depths == 0.0, 1e5 * torch.ones_like(gt_depths), gt_depths) gt_depths = torch.min(gt_depths_tmp, dim=-1).values gt_depths = gt_depths.view(B * N, H // self.downsample_factor, W // self.downsample_factor) gt_depths = (gt_depths - (self.dbound[0] - self.dbound[2])) / self.dbound[2] gt_depths = torch.where( (gt_depths < self.depth_channels + 1) & (gt_depths > 0.), gt_depths, torch.zeros_like(gt_depths)) gt_depths = F.one_hot(gt_depths.long(), num_classes=self.depth_channels + 1).view( -1, self.depth_channels + 1)[:, 1:] return gt_depths.float() def eval_step(self, batch, batch_idx, prefix: str): (sweep_imgs, mats, img_metas, _, _, _, _, pts_pv) = batch if torch.cuda.is_available(): if self.return_image: sweep_imgs = sweep_imgs.cuda() for key, value in mats.items(): mats[key] = value.cuda() if self.return_radar_pv: pts_pv = pts_pv.cuda() preds = self(sweep_imgs, mats, pts_pv=pts_pv, is_train=False) if isinstance(self.model, torch.nn.parallel.DistributedDataParallel): results = self.model.module.get_bboxes(preds, img_metas) else: results = self.model.get_bboxes(preds, img_metas) for i in range(len(results)): results[i][0] = results[i][0].tensor.detach().cpu().numpy() results[i][1] = results[i][1].detach().cpu().numpy() results[i][2] = results[i][2].detach().cpu().numpy() results[i].append(img_metas[i]) return results def validation_epoch_end(self, validation_step_outputs): detection_losses = list() heatmap_losses = list() bbox_losses = list() depth_losses = list() for validation_step_output in validation_step_outputs: detection_losses.append(validation_step_output[0]) heatmap_losses.append(validation_step_output[1]) bbox_losses.append(validation_step_output[2]) depth_losses.append(validation_step_output[3]) synchronize() self.log('val/detection', torch.mean(torch.stack(detection_losses)), on_epoch=True) self.log('val/heatmap', torch.mean(torch.stack(heatmap_losses)), on_epoch=True) self.log('val/bbox', torch.mean(torch.stack(bbox_losses)), on_epoch=True) self.log('val/depth', torch.mean(torch.stack(depth_losses)), on_epoch=True) def validation_step(self, batch, batch_idx): (sweep_imgs, mats, _, gt_boxes_3d, gt_labels_3d, _, depth_labels, pts_pv) = batch if torch.cuda.is_available(): if self.return_image: sweep_imgs = sweep_imgs.cuda() for key, value in mats.items(): mats[key] = value.cuda() if self.return_radar_pv: pts_pv = pts_pv.cuda() gt_boxes_3d = [gt_box.cuda() for gt_box in gt_boxes_3d] gt_labels_3d = [gt_label.cuda() for gt_label in gt_labels_3d] with torch.no_grad(): preds, depth_preds = self(sweep_imgs, mats, pts_pv=pts_pv, is_train=True) targets = self.model.get_targets(gt_boxes_3d, gt_labels_3d) loss_detection, loss_heatmap, loss_bbox = self.model.loss(targets, preds) if len(depth_labels.shape) == 5: # only key-frame will calculate depth loss depth_labels = depth_labels[:, 0, ...].contiguous() loss_depth = self.get_depth_loss(depth_labels.cuda(), depth_preds, weight=3.) return loss_detection, loss_heatmap, loss_bbox, loss_depth def test_epoch_end(self, test_step_outputs): all_pred_results = list() all_img_metas = list() for test_step_output in test_step_outputs: for i in range(len(test_step_output)): all_pred_results.append(test_step_output[i][:3]) all_img_metas.append(test_step_output[i][3]) synchronize() # TODO: Change another way. dataset_length = len(self.val_dataloader().dataset) all_pred_results = sum( map(list, zip(*all_gather_object(all_pred_results))), [])[:dataset_length] all_img_metas = sum(map(list, zip(*all_gather_object(all_img_metas))), [])[:dataset_length] if self.global_rank == 0: self.evaluator.evaluate(all_pred_results, all_img_metas) def configure_optimizers(self): optimizer = build_optimizer(self.model, self.optimizer_config) scheduler = MultiStepLR(optimizer, [19, 23]) return [[optimizer], [scheduler]] def train_dataloader(self):
train_dataset = NuscDatasetRadarDet(
0
2023-12-06 14:57:49+00:00
24k
jinxixiang/magic_animate_unofficial
animatediff/magic_animate/pipeline.py
[ { "identifier": "UNet3DConditionModel", "path": "animatediff/magic_animate/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optiona...
import inspect, math import numpy as np import torch import torch.distributed as dist import einops from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from animatediff.magic_animate.unet_controlnet import UNet3DConditionModel from animatediff.magic_animate.controlnet import ControlNetModel from animatediff.magic_animate.mutual_self_attention import ReferenceAttentionControl from animatediff.magic_animate.context import ( get_context_scheduler, get_total_steps ) from animatediff.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
17,204
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel,
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel,
controlnet: ControlNetModel,
1
2023-12-12 00:16:39+00:00
24k
qitan/devops-backend-lite
common/ext_fun.py
[ { "identifier": "generate_docu", "path": "common/utils/ElasticSearchAPI.py", "snippet": "def generate_docu(table, index_version=None):\n index_name = f\"{table.name}-{index_version}\" if index_version else table.name\n _tbindex = Index(index_name)\n _tbindex.analyzer(my_normalizer)\n _tbinde...
from gitlab.exceptions import GitlabGetError from functools import reduce from common.utils.ElasticSearchAPI import generate_docu, Search from common.utils.GitLabAPI import GitLabAPI from common.utils.HarborAPI import HarborAPI from common.utils.JenkinsAPI import GlueJenkins from common.custom_format import convert_xml_to_str_with_pipeline from common.variables import DASHBOARD_TIME_FORMAT, DASHBOARD_TIME_FORMAT_T, DASHBOARD_TIME_FREQNAMES, \ DASHBOARD_TIME_FREQNAMES_T, SENSITIVE_KEYS, JENKINS_CALLBACK_KEY, \ JENKINS_STATUS_MAP, DEV_LANGUAGE_KEY from dbapp.models import AppInfo, Product, KubernetesCluster, KubernetesDeploy, MicroApp, Project, ProjectConfig, DevLanguage, BuildJob, UserProfile, SystemConfig, Role, Permission, Menu, DataDict from django.conf import settings from django.core.cache import cache from django.utils import timezone from django.db.models import Q from social_django.utils import load_strategy from rest_framework.utils.serializer_helpers import ReturnDict from config import SOCIAL_AUTH_GITLAB_API_URL, GITLAB_ADMIN_TOKEN from common.utils.K8sAPI import K8sAPI from urllib.parse import urlparse, quote_plus from dateutil.relativedelta import relativedelta from dateutil.rrule import rrule from ruamel import yaml from datetime import datetime, timedelta from celery import current_app import copy import operator import re import time import pytz import os import json import requests import math import shortuuid import logging
17,589
'branch': 'master', 'content': content, 'author_email': user.email, 'author_name': user.username, 'commit_message': f'Create {instance.name} {filename} by {user.username}'}) content = project.files.raw( f"{instance.name}/{filename}", ref='master') return True, content except GitlabGetError as e: logger.info(f'获取异常,{e}') if e.response_code == 404: logger.info(f'{instance.name}/{filename}文件不存在') return True, '' except BaseException as e: logger.error(f'GitLab开发语言模板异常, 原因: {e}') return False, f'GitLab开发语言模板异常, 原因: {e}' def snake_case(x): """ 驼峰转下划线 """ term_exclude = ['OS', 'GPU', 'DB', 'IA', 'IP', 'RR', 'TTL', 'SLB', 'CPU', 'MEMORY', 'QPS'] for i in term_exclude: x = x.replace(i, i.lower()) return re.sub(r'(?P<key>[A-Z])', r'_\g<key>', x).lower().strip('_') def node_filter(node_id, data): """ 查找节点 :params: node_id int 节点ID :params: data list 节点数组 """ for i in data: if i['id'] == node_id: print('get node', i) return i else: if i.get('children', None): node = node_filter(node_id, i['children']) if isinstance(node, (dict,)): return node def get_time_range(request): """ 获取时间轴 """ type_range = request.query_params.get('range_type', 'static') if type_range == 'static': time_range = request.query_params.get('range', '6-months') else: time_range = request.query_params.getlist('range[]', None) if not time_range: time_range = '6-months' period = time_period(time_range, type_range) time_line = timeline_generate(period, format_type='cmdb') # 时间刻度, 以小时为刻度则删除年份 time_line_x = [i.split(' ')[-1] for i in time_line] if period['name'] == 'hours' else time_line return period, time_line, time_line_x def compare_dict(data, old_data): different_list = [] for k1 in data: if k1 == 'update_time': continue v1 = data.get(k1) v2 = old_data.get(k1) if v1 != v2: different_list.append({ 'key': k1, 'new_value': v1, 'old_value': v2 }) return different_list def get_project_mergerequest(project: Project, cli: GitLabAPI, **params): """ 获取项目下所有应用的合并请求 """ mrdata = [] git_project = [app.repo['id'] for app in project.microapp_set.all() if app.repo.get('id')] for project_id in set(git_project): try: git_project = cli.get_project(project_id) ok, data = cli.list_mrs(project=git_project, **params) if ok is False: continue mrdata.extend([i.attributes for i in data]) except BaseException as e: logger.error(f'获取应用合并请求异常,原因:{e}') return mrdata def gitlab_cli(user=None, admin=False, superadmin=False, merge=False): """ 获取GitlabAPI :param merge: 用于分支合并,管理员统一用配置文件里的token """ try: payload = {'token': GITLAB_ADMIN_TOKEN, 'oauth': False} cli = GitLabAPI(SOCIAL_AUTH_GITLAB_API_URL, **payload) return True, cli except BaseException as e: logger.warning(f'获取GitlabAPI异常,原因:{e}') return False, f'获取GitlabAPI异常,原因:{e}' def get_deploy_image_list(app_id, appinfo_id=None, module=None, force=0): # 可选发布镜像 # 获取关联应用ID
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Author : Charles Lai @Contact : qqing_lai@hotmail.com @Time : 2020/12/21 上午10:00 @FileName: ext_fun.py @Blog :https://imaojia.com """ logger = logging.getLogger('drf') class ThirdPartyUser(object): def get_user(self): user = UserProfile.objects.get_or_create(username='thirdparty')[0] self.set_permission(user, self.get_role()) return user def get_role(self): return Role.objects.get_or_create(name='thirdparty')[0] def get_perm(self): return Permission.objects.get_or_create(name='Jenkins回调', method='jenkins_callback')[0] def set_permission(self, user, role): role.permissions.set([self.get_perm().id]) user.roles.set([role.id]) def set_redis_data(name, config): cache.set(f"system:{name}", config, None) def get_redis_data(name): ret = cache.get(f"system:{name}") if not ret: try: if name == 'cicd-harbor': qs = SystemConfig.objects.filter(type=name)[0] else: qs = SystemConfig.objects.get(name=name) except BaseException as e: return None ret = json.loads(qs.config) set_redis_data(name, ret) return ret def get_datadict(name, config=0, default_value=None): """ 从数据字典获取数据 """ try: qs = DataDict.objects.get(key=name) except BaseException as e: return default_value if config: ret = json.loads(qs.extra) else: ret = {'id': qs.id, 'key': qs.key, 'value': qs.value, 'desc': qs.desc} return ret def check_pods(cluster_id, k8s_config, namespace, **kwargs): k8s = KubernetesCluster.objects.get(id=cluster_id) cli = k8s_cli(k8s, k8s_config) if not cli: return False count = 3 while count: ret2 = cli.get_pods(namespace, **kwargs) count -= 1 if len(ret2['items']) > 0: return True else: check_pods(k8s_config, namespace, **kwargs) return False def template_svc_generate(appinfo_obj): """ 生成Kubernetes Svc Yaml ### 格式: { "apiVersion": "v1", "kind": "Service", "metadata": { "name": "appname", "namespace": "env-product", "labels": { "app": "appname" } }, "spec": { "ports": [{ "port": 8080, "targetPort": 8080, "protocol": "TCP", "name": "http" }], "selector": { "app": "appname" } } } """ svc_temp = DataDict.objects.filter(key='yaml.svc') if svc_temp.exists(): svc_temp = json.loads(svc_temp.first().extra) if appinfo_obj.environment.name in svc_temp: svc_temp = svc_temp[appinfo_obj.environment.name] namespace = appinfo_obj.namespace svc_temp['metadata']['name'] = appinfo_obj.app.name svc_temp['metadata']['namespace'] = namespace svc_temp['metadata']['labels'] = {'app': appinfo_obj.app.name} labels = [] labels.extend([{'name': 'app', 'value': appinfo_obj.app.name}]) svc_temp['spec']['selector'] = { i['name']: i['value'] for i in labels} return True, svc_temp return False, None def harbor_cli(namespace, **filters): try: harbor = SystemConfig.objects.filter(**filters).first() # 获取harbor配置 harbor_config = json.loads(harbor.config) except BaseException as e: logger.exception(f'创建任务失败, 原因: 获取harbor仓库异常, {e}') return False, f"获取harbor仓库异常:{e}" # 构建前创建harbor项目 cli = HarborAPI(url=harbor_config['url'], username=harbor_config['user'], password=harbor_config['password']) try: cli.create_project( namespace, public=harbor_config.get('public', False)) except BaseException as e: pass return True, harbor_config def k8s_cli(k8s, k8s_config): try: if k8s_config['type'] == 'basic': # basic auth or token auth k8s_config.pop('config', None) k8s_config.pop('type', None) cli = K8sAPI(**k8s_config) else: eks = None eks_token = None k8s_config = yaml.safe_load(k8s_config['config']) if k8s.idc.type == 1 and k8s.idc.supplier.split('.')[-1] == 'aws': return False, 'not support.' cli = K8sAPI(k8s_config=k8s_config, api_key=eks_token, eks=eks) return True, cli except BaseException as e: return False, str(e) def template_generate(appinfo_obj: AppInfo, image=None, partial_deploy_replicas: int = 0): """ 生成Kubernetes Deployment Yaml """ def health_lifecycle_generate(item, enable=True): _c = {} for i in template[item]['data']: _x = {} if i.get('enable', enable): for j in i['items']: if '__' in j['name']: _t = j['name'].split('__') _value = j['value'] if j['name'] == 'exec__command': _value = ["sh", "-c", j['value']] if _x.get(_t[0], None): _x[_t[0]][_t[1]] = _value else: _x[_t[0]] = {_t[1]: _value} else: _x[j['name']] = j['value'] _c[i['name']] = _x return _c def container_generate(container_data): containers = [] for i in container_data: if i.get('enable', None): container = get_datadict(i['key'], config=1) if not container: container = i['extra'] containers.append( container) return containers language_obj = DevLanguage.objects.get(name=appinfo_obj.app.language) project_config = ProjectConfig.objects.filter(project_id=appinfo_obj.app.project.id, environment_id=appinfo_obj.environment.id) namespace = appinfo_obj.namespace harbor_config = get_redis_data('cicd-harbor') harbor_url = harbor_config['url'].split('://')[1] image = f"{harbor_url}/{image}" template = {} # 模板优先级 # 应用模块 -> 应用 -> 项目 -> 环境 if project_config.first(): project_template = project_config.first().template for k, v in project_template.items(): if v and isinstance(v, (dict,)): if v.get('custom', False) is False: if appinfo_obj.environment.template.get(k, None): template[k] = appinfo_obj.environment.template[k] else: if project_template.get(k, None): template[k] = project_template[k] microapp_template = appinfo_obj.app.template for k, v in microapp_template.items(): if '_on' in k and v: _k = k.rstrip('_on') if microapp_template.get(_k, None): template[_k] = microapp_template[_k] use_host_network = False if appinfo_obj.template.get('userHostNetwork', 0): use_host_network = True for k, v in appinfo_obj.template.items(): if v and isinstance(v, (dict,)): if v.get('custom', False) and appinfo_obj.template.get(k, None): template[k] = appinfo_obj.template[k] yaml_template = {'kind': 'Deployment', 'metadata': {}, 'spec': {'strategy': {}, 'template': {'metadata': {}, 'spec': {'containers': [{'ports': [{'containerPort': 8080}], 'resources': []}], 'imagePullSecrets': [{'name': 'loginharbor'}], 'terminationGracePeriodSeconds': 120} } } } try: tz = appinfo_obj.app.project.product.region.extra['timezone'] except BaseException as e: tz = 'Asia/Shanghai' try: if template.get('strategy', None): for i in template['strategy']['data']: if i['key'] in ['maxSurge', 'maxUnavailable']: if yaml_template['spec']['strategy'].get('rollingUpdate', None) is None: yaml_template['spec']['strategy']['rollingUpdate'] = {} yaml_template['spec']['strategy']['rollingUpdate'][i['key'] ] = f"{i['value']}%" else: yaml_template['spec'][i['key']] = i['value'] _d = {} for i in template['resources']['data']: _t = i['key'].split('_') if _d.get(_t[0], None): _d[_t[0]][_t[1]] = f"{i['value']}{i['slot']}" else: _d[_t[0]] = {_t[1]: f"{i['value']}{i['slot']}"} yaml_template['spec']['template']['spec']['containers'][0]['resources'] = _d yaml_template['metadata']['name'] = appinfo_obj.app.name yaml_template['metadata']['namespace'] = namespace yaml_template['spec']['template']['spec']['containers'][0]['name'] = appinfo_obj.app.name yaml_template['spec']['template']['spec']['containers'][0]['image'] = image command = appinfo_obj.app.template.get( 'command', None) or language_obj.labels.get('command', None) if command: if command.startswith('./'): yaml_template['spec']['template']['spec']['containers'][0]['command'] = [ command] else: yaml_template['spec']['template']['spec']['containers'][0]['command'] = [ 'sh', '-c', command] # 优先级: 应用模块>应用>预设>开发语言 labels = template['label']['data'] labels.extend([{'name': 'app', 'value': appinfo_obj.app.name}]) yaml_template['spec']['template']['metadata']['labels'] = { i['name']: i['value'] for i in labels} yaml_template['spec']['template']['metadata']['labels'][ 'status-app-name-for-ops-platform'] = appinfo_obj.app.name yaml_template['spec']['selector'] = { 'matchLabels': {i['name']: i['value'] for i in labels}} selectors = template['selector']['data'] yaml_template['spec']['template']['spec']['nodeSelector'] = { i['name']: i['value'] for i in selectors} if 'annotations' not in yaml_template['spec']['template']['metadata']: yaml_template['spec']['template']['metadata']['annotations'] = {} for i in template['prometheus']['data']: yaml_template['spec']['template']['metadata'][ 'annotations'][f'prometheus.io/{i["name"]}'] = i['value'] if 'prometheus.io/path' in yaml_template['spec']['template']['metadata']['annotations']: yaml_template['spec']['template']['metadata']['annotations'][ 'prometheus.io/app_product'] = appinfo_obj.app.project.product.name yaml_template['spec']['template']['metadata']['annotations'][ 'prometheus.io/app_env'] = appinfo_obj.environment.name yaml_template['spec']['template']['metadata']['annotations'][ 'prometheus.io/app_project'] = appinfo_obj.app.project.name # 环境变量 envs = [{'name': 'TZ', 'value': tz}] envs.extend(template['env']['data']) envs.extend([ {'name': '_RESTART', 'value': datetime.now().strftime( '%Y%m%d%H%M%S')}, # _RESTART变量用于强制更新deployment {'name': 'PRODUCT_NAME', 'value': appinfo_obj.app.project.product.name}, {'name': 'PROJECT_NAME', 'value': appinfo_obj.app.project.name}, {'name': 'APPNAME', 'value': appinfo_obj.app.name}, {'name': 'APPID', 'value': appinfo_obj.app.appid}, {'name': 'ENV', 'value': appinfo_obj.environment.name}, {'name': 'POD_NAMESPACE', 'value': namespace} ]) envs = list({i['name']: i for i in envs}.values()) for i in envs: try: env_value = i.get('value', None) cmname = i.pop('cmname', None) cmkey = i.pop('cmkey', None) if env_value: env_value = env_value.lstrip('"').rstrip( '"').lstrip("'").rstrip("'") i.pop('value', None) i['name'] = i['name'].lstrip('"').rstrip( '"').lstrip("'").rstrip("'") if i.get('valueFrom', None) == 'configMapKeyRef': i['valueFrom'] = {'configMapKeyRef': { 'name': cmname, 'key': cmkey}} else: i['value'] = env_value i['valueFrom'] = None except BaseException as e: pass yaml_template['spec']['template']['spec']['containers'][0]['env'] = envs if template.get('health', False): _d = health_lifecycle_generate('health', True) for k, v in _d.items(): yaml_template['spec']['template']['spec']['containers'][0][k] = v if template.get('lifecycle', False): yaml_template['spec']['template']['spec']['containers'][0]['lifecycle'] = { } _d = health_lifecycle_generate('lifecycle', False) for k, v in _d.items(): yaml_template['spec']['template']['spec']['containers'][0]['lifecycle'][k] = v _vo_mount = [{'mountPath': '/data/logs', 'name': 'logs', 'readOnly': False}] _volumes = [{'name': 'logs', 'type': 'Directory', 'hostPath': { 'path': f'/data/{appinfo_obj.environment.name}-applogs/{appinfo_obj.app.project.name}/'}}] if template.get('storage', None): for k, v in template['storage']['data'].items(): for i in v: _x = {} for m, n in i.items(): if isinstance(n, (str,)): n = n.replace('${APPNAME}', appinfo_obj.app.name) if '_' in m: _t = m.split('_') if _x.get(_t[0], None): _x[_t[0]][_t[1]] = n else: _x[_t[0]] = {_t[1]: n} else: _x[m] = n _t = {'mountPath': _x['mount'], 'name': _x['name'], 'readOnly': True if _x.get('mode', None) == 'ReadOnly' else False} if _x.get('file', None): _t['subPath'] = _x['configMap']['items'][0]['key'] _vo_mount.append(_t) _mode = _x.pop('mode', None) _x.pop('file', None) _x.pop('mount', None) if _x.get('configMap', None): _x['configMap']['defaultMode'] = 0o600 if _mode == 'ReadOnly' else 0o755 _volumes.append(_x) yaml_template['spec']['template']['spec']['containers'][0]['volumeMounts'] = _vo_mount yaml_template['spec']['template']['spec']['volumes'] = _volumes if use_host_network: yaml_template['spec']['template']['spec']['hostNetwork'] = True partial_deploy_yaml_template = None except BaseException as e: logger.exception(f'generate yaml err {e.__class__} {e}') return {'ecode': 500, 'message': str(e)} # 多容器处理 if appinfo_obj.template.get('containers_custom', None): containers = container_generate( appinfo_obj.template.get('containers', [])) else: containers = container_generate( project_config.first().template.get('containers', [])) yaml_template['spec']['template']['spec']['containers'].extend(containers) ret = {'ecode': 200, 'image': image, 'yaml': yaml_template} if partial_deploy_yaml_template: ret['partial_deploy_yaml'] = partial_deploy_yaml_template return ret def get_members(obj): team_members = [j for i in obj.team_members.values() for j in i] return list(set(team_members)) def get_permission_from_role(request): try: perms = request.user.roles.values( 'permissions__method', ).distinct() return [p['permissions__method'] for p in perms] except AttributeError: return [] def get_headers(request=None): """ Function: get_headers(self, request) Description: To get all the headers from request """ regex = re.compile('^HTTP_') return dict((regex.sub('', header), value) for (header, value) in request.META.items() if header.startswith('HTTP_')) def mask_sensitive_data(data): """ Hides sensitive keys specified in sensitive_keys settings. Loops recursively over nested dictionaries. """ if hasattr(settings, 'DRF_API_LOGGER_EXCLUDE_KEYS'): if type(settings.DRF_API_LOGGER_EXCLUDE_KEYS) in (list, tuple): SENSITIVE_KEYS.extend(settings.DRF_API_LOGGER_EXCLUDE_KEYS) if type(data) != dict and type(data) != ReturnDict: try: data = json.loads(data) except BaseException as e: return data for key, value in data.items(): if key in SENSITIVE_KEYS: data[key] = "***FILTERED***" if type(value) == dict: data[key] = mask_sensitive_data(data[key]) return data def time_convert(target_time): """ 时间转字符串 """ return target_time.astimezone(pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S+08:00') def time_comp(target_time, **kwargs): """ 时间比较/统一使用utc时间对比 target_time: 目标时间 kwargs: 额外参数, 时间差 如{hours: 1}, {minutes: 1}, {seconds: 1}, 数值不取负数 """ ctime = timezone.now() if kwargs: # 两个时间是否在期望时间差范围 if target_time > ctime: return target_time - ctime <= timedelta(**kwargs) else: return ctime - target_time <= timedelta(**kwargs) # 判断两个时间是否相等 return ctime == target_time def timeline_generate(time_range, format_type='dashboard'): """ 根据起始时间生成时间线 : params format_type: 默认为dashboard, 用于概览报表粗略显示, 其它用于监控类的展示则使用更细粒度的格式 """ TIME_FREQNAMES = DASHBOARD_TIME_FREQNAMES TIME_FORMAT = DASHBOARD_TIME_FORMAT if format_type == 'cmdb': TIME_FREQNAMES = DASHBOARD_TIME_FREQNAMES_T TIME_FORMAT = DASHBOARD_TIME_FORMAT_T start_time = time_range['start_time'] end_time = time_range['end_time'] time_line = rrule( freq=TIME_FREQNAMES[time_range['name']], dtstart=start_time, until=end_time) return [i.strftime(TIME_FORMAT[time_range['name']]) for i in time_line] def time_period(time_range='6-months', type_range='static', time_zone='Asia/Shanghai', name=None): """ 根据时间范围生成起止时间 """ start_time = None end_time = timezone.now().astimezone(pytz.timezone(time_zone)) if type_range == 'dynamic' and name is None: start_time = datetime.strptime(time_range[0], '%Y-%m-%d %H:%M:%S') end_time = datetime.strptime(time_range[1], '%Y-%m-%d %H:%M:%S') if start_time > end_time: start_time, end_time = end_time, start_time if (end_time - start_time).days >= 60: name = 'months' elif (end_time - start_time).days >= 2: name = 'days' elif (end_time - start_time).days >= 1 or (end_time - start_time).seconds > 60 * 60: name = 'hours' else: name = 'minutes' return {'name': name, 'start_time': start_time, 'end_time': end_time} if type_range == 'static': _time = time_range.split('-') if _time[-1] == 'week': start_time = end_time - relativedelta(days=end_time.weekday(), hours=end_time.hour, minutes=end_time.minute, seconds=end_time.second, microseconds=end_time.microsecond) return {'name': 'days', 'start_time': start_time, 'end_time': end_time} if _time[-1] == 'lastweek': start_time = end_time - relativedelta(days=end_time.weekday() + 7, hours=end_time.hour, minutes=end_time.minute, seconds=end_time.second, microseconds=end_time.microsecond) end_time = end_time - relativedelta(days=end_time.weekday(), hours=end_time.hour, minutes=end_time.minute, seconds=end_time.second, microseconds=end_time.microsecond) return {'name': 'days', 'start_time': start_time, 'end_time': end_time} if _time[-1] in ['today', 'yesterday']: start_time = end_time - relativedelta(hours=end_time.hour, minutes=end_time.minute, seconds=end_time.second, microseconds=end_time.microsecond) if _time[-1] == 'yesterday': end_time = start_time start_time = end_time - relativedelta(days=1) return {'name': 'hours', 'start_time': start_time, 'end_time': end_time} name = _time[1] if name is None: if _time[1] in ['years', 'months']: name = 'months' if _time[1] == 'months' and int(_time[0]) < 2: name = 'days' if _time[1] == 'days' and int(_time[0]) < 2: name = 'hours' start_time = end_time + relativedelta(**{_time[1]: -int(_time[0])}) return {'name': name, 'start_time': start_time, 'end_time': end_time} def extend_jenkins(data, env): jenkins = get_redis_data('cicd-jenkins') app = AppInfo.objects.filter(id=data['id'])[0] category = DataDict.objects.get(key=app.app.category) job_name = app.jenkins_jobname jenkins_cli = GlueJenkins(jenkins.get('url', 'http://localhost'), username=jenkins.get('user', 'admin'), password=jenkins.get('password', None)) try: view_xml_config = f'''<?xml version="1.0" encoding="UTF-8"?> <hudson.model.ListView> <name>{app.app.project.alias}{env.alias}</name> <filterExecutors>false</filterExecutors> <filterQueue>false</filterQueue> <properties class="hudson.model.View$PropertyList"/> <jobNames> <comparator class="hudson.util.CaseInsensitiveComparator"/> </jobNames> <jobFilters/> <columns> <hudson.views.StatusColumn/> <hudson.views.WeatherColumn/> <hudson.views.JobColumn/> <jenkins.branch.DescriptionColumn/> <hudson.views.LastSuccessColumn/> <hudson.views.LastFailureColumn/> <hudson.views.LastDurationColumn/> <hudson.views.BuildButtonColumn/> </columns> <includeRegex>{env.name.lower()}-.*-{app.app.project.name.lower()}-.*</includeRegex> </hudson.model.ListView>''' jenkins_cli.create_view( f'{app.app.project.alias}{env.alias}', view_xml_config) except BaseException as e: pass try: config_xml = convert_xml_to_str_with_pipeline(jenkins['xml'], jenkins['pipeline']['http_url_to_repo'], jenkins['gitlab_credit'], app.app.alias, f'{app.app.language}/Jenkinsfile') if not jenkins_cli.job_exists(job_name): jenkins_cli.create_job(name=job_name, config_xml=config_xml) else: jenkins_cli.reconfig_job(name=job_name, config_xml=config_xml) except Exception as e: logger.error(f"创建Jenkins JOB: {job_name} 失败 ERROR: {e}") def get_celery_tasks(): """ 获取celery任务 """ current_app.loader.import_default_modules() tasks = list( sorted(name for name in current_app.tasks if not name.startswith('celery.'))) return tasks def is_chinese(string): """ 检查整个字符串是否包含中文 :param string: 需要检查的字符串 :return: bool """ for ch in string: if u'\u4e00' <= ch <= u'\u9fff': return True return False def get_word_list(string): """ 切割字符串, 中文/-切割成单个字, 其它则切割成单个词 """ res = re.compile(r"([\u4e00-\u9fa5\-])") return [i for i in res.split(string.lower()) if len(i.strip()) > 0 and i != '-'] def devlanguage_template_manage(instance, filename, user=None, content=None, action='retrieve'): jenkins = get_redis_data('cicd-jenkins') ok, cli = gitlab_cli(admin=True) if not ok: return False, cli project_id = jenkins['pipeline'].get('id', None) if not project_id: return False, '获取流水线失败,请检查Jenkins配置.' project = cli.get_project(project_id) items = project.repository_tree(path=instance.name) try: if action == 'update': if filename in [i['name'] for i in items]: # 文件已存在则更新 f = project.files.get( f"{instance.name}/{filename}", ref='master') f.content = content f.save( branch='master', commit_message=f'Update {instance.name} {filename} by {user.username}') else: # 文件不存在则创建 logger.info(f'{instance.name}/{filename}文件不存在则创建') project.files.create({'file_path': f"{instance.name}/{filename}", 'branch': 'master', 'content': content, 'author_email': user.email, 'author_name': user.username, 'commit_message': f'Create {instance.name} {filename} by {user.username}'}) content = project.files.raw( f"{instance.name}/{filename}", ref='master') return True, content except GitlabGetError as e: logger.info(f'获取异常,{e}') if e.response_code == 404: logger.info(f'{instance.name}/{filename}文件不存在') return True, '' except BaseException as e: logger.error(f'GitLab开发语言模板异常, 原因: {e}') return False, f'GitLab开发语言模板异常, 原因: {e}' def snake_case(x): """ 驼峰转下划线 """ term_exclude = ['OS', 'GPU', 'DB', 'IA', 'IP', 'RR', 'TTL', 'SLB', 'CPU', 'MEMORY', 'QPS'] for i in term_exclude: x = x.replace(i, i.lower()) return re.sub(r'(?P<key>[A-Z])', r'_\g<key>', x).lower().strip('_') def node_filter(node_id, data): """ 查找节点 :params: node_id int 节点ID :params: data list 节点数组 """ for i in data: if i['id'] == node_id: print('get node', i) return i else: if i.get('children', None): node = node_filter(node_id, i['children']) if isinstance(node, (dict,)): return node def get_time_range(request): """ 获取时间轴 """ type_range = request.query_params.get('range_type', 'static') if type_range == 'static': time_range = request.query_params.get('range', '6-months') else: time_range = request.query_params.getlist('range[]', None) if not time_range: time_range = '6-months' period = time_period(time_range, type_range) time_line = timeline_generate(period, format_type='cmdb') # 时间刻度, 以小时为刻度则删除年份 time_line_x = [i.split(' ')[-1] for i in time_line] if period['name'] == 'hours' else time_line return period, time_line, time_line_x def compare_dict(data, old_data): different_list = [] for k1 in data: if k1 == 'update_time': continue v1 = data.get(k1) v2 = old_data.get(k1) if v1 != v2: different_list.append({ 'key': k1, 'new_value': v1, 'old_value': v2 }) return different_list def get_project_mergerequest(project: Project, cli: GitLabAPI, **params): """ 获取项目下所有应用的合并请求 """ mrdata = [] git_project = [app.repo['id'] for app in project.microapp_set.all() if app.repo.get('id')] for project_id in set(git_project): try: git_project = cli.get_project(project_id) ok, data = cli.list_mrs(project=git_project, **params) if ok is False: continue mrdata.extend([i.attributes for i in data]) except BaseException as e: logger.error(f'获取应用合并请求异常,原因:{e}') return mrdata def gitlab_cli(user=None, admin=False, superadmin=False, merge=False): """ 获取GitlabAPI :param merge: 用于分支合并,管理员统一用配置文件里的token """ try: payload = {'token': GITLAB_ADMIN_TOKEN, 'oauth': False} cli = GitLabAPI(SOCIAL_AUTH_GITLAB_API_URL, **payload) return True, cli except BaseException as e: logger.warning(f'获取GitlabAPI异常,原因:{e}') return False, f'获取GitlabAPI异常,原因:{e}' def get_deploy_image_list(app_id, appinfo_id=None, module=None, force=0): # 可选发布镜像 # 获取关联应用ID
app = MicroApp.objects.get(id=app_id)
14
2023-12-13 03:09:32+00:00
24k
MarilynKeller/aitviewer-skel
aitviewer/renderables/sdf.py
[ { "identifier": "BoundingBoxes", "path": "aitviewer/renderables/bounding_boxes.py", "snippet": "class BoundingBoxes(Node):\n \"\"\"\n Draw bounding boxes.\n \"\"\"\n\n def __init__(self, vertices, thickness=0.005, color=(0.0, 0.0, 1.0, 1.0), **kwargs):\n \"\"\"\n Initializer.\n...
import numpy as np from skimage import measure from aitviewer.renderables.bounding_boxes import BoundingBoxes from aitviewer.renderables.lines import Lines from aitviewer.renderables.meshes import Meshes from aitviewer.scene.node import Node from aitviewer.utils.decorators import hooked
19,602
# Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos class SDF(Node): """ Renderable that can be used to draw level sets of a dense SDF volume meshed using marching cubes. This renderable internally uses the marching cubes algorithm from skimage. For a faster marching cubes implementation see the Volume renderable. """ def __init__( self, volume, size=(1, 1, 1), level=0.0, color=(0.7, 0.7, 0.7, 1.0), level_sets=None, level_set_colors=None, mc_step_size=1, **kwargs, ): """Initializer. :param volume: np array of shape (X, Y, Z) of signed distance values :param size: size of the volume in local units. :param level: the level set used for the main mesh. :param color: color of the main mesh. :param level_sets: a list or array of additional level set values to display. :param level_set_colors: a list or array of shape (L, 4) of the same length as the level_set parameter with colors to use for the additional level sets. :param mc_step_size: step size used for marching cubes. :param **kwargs: arguments forwarded to the Node constructor. """ assert len(volume.shape) == 3 and len(size) == 3 kwargs["gui_material"] = False super().__init__(**kwargs) self.volume = volume self.size = np.array((size), np.float32) # Mesh. verts, faces, normals, _ = measure.marching_cubes( volume, level, spacing=self.size / (np.array(self.volume.shape) - 1.0), step_size=mc_step_size ) self.mesh = Meshes(verts, faces, vertex_normals=-normals, color=color, name="Mesh") # Level sets. self.level_sets: list[Meshes] = [] if level_sets is not None: if level_set_colors is not None: assert len(level_sets) == len(level_set_colors) for i, s in enumerate(level_sets): verts, faces, normals, _ = measure.marching_cubes( volume, s, spacing=self.size / (np.array(self.volume.shape) - 1.0), step_size=mc_step_size ) shell = Meshes(verts, faces, vertex_normals=-normals, name=f"Level {s:.03f}", cast_shadow=False) if level_set_colors is not None: shell.color = tuple(level_set_colors[i]) shell.clip_control = np.array((1, 1, 1)) shell.clip_value = self.size.copy() shell.backface_culling = False self.level_sets.append(shell) # Bounding box. self.bounding_box = BoundingBoxes.from_min_max_diagonal( np.array([[0.0, 0.0, 0.0]]), np.array([self.size], dtype=np.float32), color=(0, 0, 0, 1), name="Bounding Box", gui_affine=False, ) # Clip plane lines. self.clip_lines = [] for i, axis in enumerate(["X", "Y", "Z"]): s0 = self.size[(i + 0) % 3] s1 = self.size[(i + 1) % 3] s2 = self.size[(i + 2) % 3] lines = np.array( ( [s0, 0, 0], [s0, s1, 0], [s0, s1, s2], [s0, 0, s2], [s0, 0, 0], ), dtype=np.float32, ) lines = np.roll(lines, axis=1, shift=(0, i)) color = np.array([0, 0, 0, 1]) color[i] = 1
# Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos class SDF(Node): """ Renderable that can be used to draw level sets of a dense SDF volume meshed using marching cubes. This renderable internally uses the marching cubes algorithm from skimage. For a faster marching cubes implementation see the Volume renderable. """ def __init__( self, volume, size=(1, 1, 1), level=0.0, color=(0.7, 0.7, 0.7, 1.0), level_sets=None, level_set_colors=None, mc_step_size=1, **kwargs, ): """Initializer. :param volume: np array of shape (X, Y, Z) of signed distance values :param size: size of the volume in local units. :param level: the level set used for the main mesh. :param color: color of the main mesh. :param level_sets: a list or array of additional level set values to display. :param level_set_colors: a list or array of shape (L, 4) of the same length as the level_set parameter with colors to use for the additional level sets. :param mc_step_size: step size used for marching cubes. :param **kwargs: arguments forwarded to the Node constructor. """ assert len(volume.shape) == 3 and len(size) == 3 kwargs["gui_material"] = False super().__init__(**kwargs) self.volume = volume self.size = np.array((size), np.float32) # Mesh. verts, faces, normals, _ = measure.marching_cubes( volume, level, spacing=self.size / (np.array(self.volume.shape) - 1.0), step_size=mc_step_size ) self.mesh = Meshes(verts, faces, vertex_normals=-normals, color=color, name="Mesh") # Level sets. self.level_sets: list[Meshes] = [] if level_sets is not None: if level_set_colors is not None: assert len(level_sets) == len(level_set_colors) for i, s in enumerate(level_sets): verts, faces, normals, _ = measure.marching_cubes( volume, s, spacing=self.size / (np.array(self.volume.shape) - 1.0), step_size=mc_step_size ) shell = Meshes(verts, faces, vertex_normals=-normals, name=f"Level {s:.03f}", cast_shadow=False) if level_set_colors is not None: shell.color = tuple(level_set_colors[i]) shell.clip_control = np.array((1, 1, 1)) shell.clip_value = self.size.copy() shell.backface_culling = False self.level_sets.append(shell) # Bounding box. self.bounding_box = BoundingBoxes.from_min_max_diagonal( np.array([[0.0, 0.0, 0.0]]), np.array([self.size], dtype=np.float32), color=(0, 0, 0, 1), name="Bounding Box", gui_affine=False, ) # Clip plane lines. self.clip_lines = [] for i, axis in enumerate(["X", "Y", "Z"]): s0 = self.size[(i + 0) % 3] s1 = self.size[(i + 1) % 3] s2 = self.size[(i + 2) % 3] lines = np.array( ( [s0, 0, 0], [s0, s1, 0], [s0, s1, s2], [s0, 0, s2], [s0, 0, 0], ), dtype=np.float32, ) lines = np.roll(lines, axis=1, shift=(0, i)) color = np.array([0, 0, 0, 1]) color[i] = 1
self.clip_lines.append(Lines(lines, cast_shadow=False, color=color, name=f"Clip {axis}", gui_affine=False))
1
2023-12-07 16:13:50+00:00
24k
nexB/dejacode
dje/admin.py
[ { "identifier": "IS_FILTER_LOOKUP_VAR", "path": "dje/filters.py", "snippet": "IS_FILTER_LOOKUP_VAR = \"_filter_lookup\"" }, { "identifier": "CreatedByListFilter", "path": "dje/filters.py", "snippet": "class CreatedByListFilter(filters.SimpleListFilter):\n \"\"\"\n Filter by the use...
import csv import operator from collections import OrderedDict from copy import copy from functools import reduce from django import forms from django.conf import settings from django.contrib import admin from django.contrib import messages from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME from django.contrib.admin.options import IS_POPUP_VAR from django.contrib.admin.sites import AdminSite from django.contrib.admin.templatetags.admin_urls import add_preserved_filters from django.contrib.admin.utils import lookup_spawns_duplicates from django.contrib.admin.utils import unquote from django.contrib.admin.views.main import ChangeList from django.contrib.admin.widgets import AdminDateWidget from django.contrib.auth.admin import GroupAdmin from django.contrib.auth.admin import UserAdmin from django.contrib.auth.models import Group from django.contrib.auth.models import Permission from django.contrib.auth.views import LogoutView from django.contrib.contenttypes.admin import GenericTabularInline from django.contrib.contenttypes.models import ContentType from django.core import checks from django.core.exceptions import FieldDoesNotExist from django.core.exceptions import FieldError from django.core.exceptions import PermissionDenied from django.db import models from django.forms.formsets import DELETION_FIELD_NAME from django.http import HttpResponse from django.http.request import QueryDict from django.shortcuts import get_object_or_404 from django.shortcuts import redirect from django.template.loader import render_to_string from django.template.response import TemplateResponse from django.urls import path from django.urls import reverse from django.utils.encoding import force_str from django.utils.html import format_html from django.utils.html import format_html_join from django.utils.http import urlencode from django.utils.translation import gettext as _ from django.views.generic import RedirectView from django_registration.backends.activation.views import RegistrationView from dje.filters import IS_FILTER_LOOKUP_VAR from dje.filters import CreatedByListFilter from dje.filters import DataspaceFilter from dje.filters import HistoryCreatedActionTimeListFilter from dje.filters import HistoryModifiedActionTimeListFilter from dje.filters import LimitToDataspaceListFilter from dje.filters import MissingInFilter from dje.forms import DataspaceAdminForm from dje.forms import DataspacedAdminForm from dje.forms import DejaCodeAuthenticationForm from dje.importers import import_view from dje.list_display import AsURL from dje.mass_update import mass_update_action from dje.models import Dataspace from dje.models import DataspaceConfiguration from dje.models import DejacodeUser from dje.models import ExternalReference from dje.models import ExternalSource from dje.models import History from dje.models import HistoryFieldsMixin from dje.models import is_dataspace_related from dje.notification import send_notification_email from dje.notification import send_notification_email_on_queryset from dje.permissions import get_protected_fields from dje.search import advanced_search from dje.utils import CHANGELIST_LINK_TEMPLATE from dje.utils import class_wrap from dje.utils import construct_changes_details_message from dje.utils import get_previous_next from dje.utils import group_by from dje.utils import has_permission from dje.utils import queryset_to_changelist_href from dje.views import ActivityLog from dje.views import clone_dataset_view from dje.views import docs_models_view from dje.views import manage_copy_defaults_view from dje.views import manage_tab_permissions_view from dje.views import object_compare_view from dje.views import object_copy_view from axes.admin import AccessAttemptAdmin from axes.models import AccessAttempt
19,188
( "Application Process Settings", { "fields": ( "set_usage_policy_on_new_component_from_licenses", "enable_package_scanning", "update_packages_from_scan", "enable_purldb_access", "enable_vulnerablecodedb_access", ) }, ), ) search_fields = ("name",) inlines = [DataspaceConfigurationInline] form = DataspaceAdminForm change_form_template = "admin/dje/dataspace/change_form.html" change_list_template = "admin/change_list_extended.html" def has_change_permission(self, request, obj=None): """ Bypass the ReferenceOnlyPermissions to allow regular Dataspace admins, with the right permission, to edit their own Dataspace. """ return super(admin.ModelAdmin, self).has_change_permission(request, obj) def get_readonly_fields(self, request, obj=None): """Make Dataspace.name field readonly on edit except for reference Dataspace superusers.""" readonly_fields = super().get_readonly_fields(request, obj) user = request.user if obj and not (user.dataspace.is_reference and user.is_superuser): readonly_fields += ("name",) return readonly_fields def get_urls(self): info = self.model._meta.app_label, self.model._meta.model_name urls = [ path( "<pk>/clonedataset/", self.admin_site.admin_view(clone_dataset_view), name="{}_{}_clonedataset".format(*info), ), path( "<pk>/tab_permissions/", self.admin_site.admin_view(manage_tab_permissions_view), name="{}_{}_tab_permissions".format(*info), ), path( "<pk>/copy_defaults/", self.admin_site.admin_view(manage_copy_defaults_view), name="{}_{}_copy_defaults".format(*info), ), ] return urls + super().get_urls() def get_queryset(self, request): """ Limit the QuerySet to the current user Dataspace. + the Reference one. If the user Dataspace is the Reference then show all. """ qs = super().get_queryset(request) if not request.user.dataspace.is_reference: qs = qs.filter(id=request.user.dataspace_id) return qs def get_actions(self, request): """Remove the bulk delete action, it does not make sense for Dataspace.""" actions = super().get_actions(request) if "delete_selected" in actions: del actions["delete_selected"] return actions def changeform_view(self, request, object_id=None, form_url="", extra_context=None): extra_context = extra_context or {} extra_context["template_dataspace"] = settings.TEMPLATE_DATASPACE return super().changeform_view(request, object_id, form_url, extra_context) class ChildRelationshipInline(DataspacedFKMixin, admin.TabularInline): fk_name = "parent" extra = 0 classes = ("grp-collapse grp-open",) raw_id_fields = ("child",) autocomplete_lookup_fields = {"fk": ["child"]} verbose_name = _("Child") class ExternalReferenceInline(DataspacedFKMixin, GenericTabularInline): model = ExternalReference extra = 0 classes = ("grp-collapse grp-open",) @admin.register(ExternalSource, site=dejacode_site) class ExternalSourceAdmin(DataspacedAdmin): def references(self, obj): """ Return links to the content_object changelist of ExternalReference instances, for the given ExternalSource instance, grouped per ContentType. """ changelist_links = [] queryset = obj.externalreference_set grouped = group_by(queryset, "content_type", count_on="object_id", distinct=True) for value in grouped: model_class = ContentType.objects.get(id=value["content_type"]).model_class() opts = model_class._meta url = reverse(f"admin:{opts.app_label}_{opts.model_name}_changelist") params = {EXTERNAL_SOURCE_LOOKUP: obj.id} href = f"{url}?{urlencode(params)}" changelist_link = format_html( CHANGELIST_LINK_TEMPLATE, href, value["count"], opts.verbose_name_plural ) changelist_links.append([changelist_link]) html_list = "<ul>{}</ul>".format(format_html_join("", "<li>{}</li>", changelist_links))
# # Copyright (c) nexB Inc. and others. All rights reserved. # DejaCode is a trademark of nexB Inc. # SPDX-License-Identifier: AGPL-3.0-only # See https://github.com/nexB/dejacode for support or download. # See https://aboutcode.org for more information about AboutCode FOSS projects. # EXTERNAL_SOURCE_LOOKUP = "external_references__external_source_id" ADDITION = History.ADDITION CHANGE = History.CHANGE DELETION = History.DELETION class DejaCodeAdminSite(AdminSite): login_template = "registration/login.html" login_form = DejaCodeAuthenticationForm site_title = _("DejaCode Administration") site_header = _("DejaCode Administration") index_title = _("DejaCode Administration") empty_value_display = "" def get_urls(self): """Override the admin:logout and admin:password_change views to the default ones.""" urls = [ path("logout/", LogoutView.as_view(next_page="login"), name="logout"), path( "password_change/", RedirectView.as_view(url="/account/password_change/", permanent=True), name="password_change", ), path("docs/models/", docs_models_view, name="docs_models"), ] return urls + super().get_urls() dejacode_site = DejaCodeAdminSite() @admin.display(description="") def get_hierarchy_link(obj): """Return a link to the Hierarchy view if the obj has at least 1 parent or 1 child.""" if obj.has_parent_or_child(): return format_html( '<a href="{}#hierarchy" target="_blank" class="hierarchy-icon"' ' title="Hierarchy">&nbsp;</a>', obj.get_absolute_url(), ) def get_additional_information_fieldset(pre_fields=None): fields = ( "dataspace", "uuid", "created_date", "created_by", "last_modified_date", "last_modified_by", ) if pre_fields: fields = pre_fields + fields return ("Additional Information", {"classes": ("grp-collapse grp-closed",), "fields": fields}) class ReferenceOnlyPermissions: def has_add_permission(self, request): """Limits the addition to Reference dataspace users.""" perm = super().has_add_permission(request) return perm and request.user.dataspace.is_reference def has_change_permission(self, request, obj=None): """Limits the change to Reference dataspace users.""" perm = super().has_change_permission(request, obj) return perm and request.user.dataspace.is_reference def has_delete_permission(self, request, obj=None): """Limits the deletion to Reference dataspace users.""" perm = super().has_delete_permission(request, obj) return perm and request.user.dataspace.is_reference def has_view_permission(self, request, obj=None): perm = super().has_view_permission(request, obj) return perm and request.user.dataspace.is_reference class DataspacedFKMixin: """ Limit the QuerySet of ForeignKeys to the current Dataspace, or to the parent object in case of Inlines. On ADDITION, the Dataspace is taken from the User On MODIFICATION, it's taken on the current object instance or parent instance in case of Inlines. This class can be applied to ModelAdmins and Inlines. The declared limit_choices_to on the model field will be respected. """ # The QuerySet for the fields in this list will be scoped by the Model content_type content_type_scope_fields = [] def formfield_for_foreignkey(self, db_field, request=None, **kwargs): # If a QuerySet was given in the kwargs of the calling method, we then # assume that the filtering was done and we skip further processing. qs = kwargs.get("queryset", None) if qs is not None: return super().formfield_for_foreignkey(db_field, request, **kwargs) related_model = db_field.related_model if is_dataspace_related(related_model): # No instance, ADDITION, get dataspace from user if not getattr(request, "_object", None): dataspace = request.user.dataspace # Parent instance, MODIFICATION, dataspace from instance else: dataspace = request._object.dataspace kwargs["queryset"] = db_field.related_model.objects.scope(dataspace).complex_filter( db_field.remote_field.limit_choices_to ) if db_field.name in self.content_type_scope_fields: kwargs["queryset"] = kwargs["queryset"].filter( content_type=ContentType.objects.get_for_model(self.model) ) return super().formfield_for_foreignkey(db_field, request, **kwargs) class ProtectedFieldsMixin: def get_readonly_fields(self, request, obj=None): """Add field level permissions.""" readonly_fields = super().get_readonly_fields(request, obj) protected_fields = get_protected_fields(self.model, request.user) if protected_fields: readonly_fields += tuple(protected_fields) return readonly_fields class ChangelistPopupPermissionMixin: """ Allow the changelist view access in popup mode for users without change permission. In the case of raw_id_fields feature, this view need be be available to select the related object. This mixin bypass the limitation in Django: https://code.djangoproject.com/ticket/11561 Only the changelist is available, the form is never accessible. """ def has_change_permission(self, request, obj=None): if obj is None and IS_POPUP_VAR in request.GET: return True return super().has_change_permission(request, obj) class ProhibitDataspaceLookupMixin: """ Prohibit all `dataspace` related lookups. Remove the possibility to look into other Dataspaces. """ def lookup_allowed(self, lookup, value): if lookup.startswith("dataspace"): return False return super().lookup_allowed(lookup, value) def check(self, **kwargs): errors = super().check(**kwargs) has_dataspace_filter = DataspaceFilter in self.list_filter if has_dataspace_filter: errors.append( checks.Error(f"Remove {DataspaceFilter} from {self}.list_filter", obj=self) ) return errors def get_queryset(self, request): return super().get_queryset(request).scope_for_user(request.user) class AdvancedSearchAdminMixin: def get_search_results(self, request, queryset, search_term): """Replace default search with advanced system.""" use_distinct = False search_fields = self.get_search_fields(request) if search_fields and search_term: filters = [] try: filters = advanced_search(search_term, search_fields) except FieldError as e: messages.error(request, e) except ValueError as e: messages.error(request, f"Search terms error: {e}") if filters: queryset = queryset.filter(filters) if not use_distinct: for search_spec, __ in filters.children: if lookup_spawns_duplicates(self.opts, search_spec): use_distinct = True break return queryset, use_distinct class HistoryAdminMixin: def log_addition(self, request, object, change_message=None): history_entry = History.log_addition(request.user, object) if ADDITION in getattr(self, "email_notification_on", []): send_notification_email(request.user, object, ADDITION) return history_entry def log_change(self, request, object, message): """ Add notification on object update. The notification system can be disabled by setting _disable_notification to True on the request. """ serialized_data = getattr(request, "_serialized_data", None) history_entry = History.log_change(request.user, object, message, serialized_data) message = history_entry.get_change_message() disable_notification = getattr(request, "_disable_notification", False) if CHANGE in getattr(self, "email_notification_on", []) and not disable_notification: # Expending the base message with details changes_details = getattr(request, "changes_details", {}) message += construct_changes_details_message(changes_details) send_notification_email(request.user, object, CHANGE, message) return history_entry def log_deletion(self, request, object, object_repr): """ Log that an object will be deleted. Note that this method must be called before the deletion. """ return History.log_deletion(request.user, object) def history_view(self, request, object_id, extra_context=None): response = super().history_view(request, object_id, extra_context) context_data = getattr(response, "context_data", None) if context_data: # In case response is a HttpResponseRedirect obj = context_data["object"] history_qs = History.objects if is_dataspace_related(self.model): history_qs = history_qs.filter(object_dataspace__id=obj.dataspace_id) history_entries = ( history_qs.filter( object_id=unquote(object_id), content_type=ContentType.objects.get_for_model(self.model), ) .select_related() .order_by("-action_time") ) obj_has_history_fields = isinstance(obj, HistoryFieldsMixin) if obj_has_history_fields: # Use the history fields from the model for the Addition entry. addition_entry = History( action_time=obj.created_date, user=obj.created_by, change_message="Added.", ) history_entries = history_entries.exclude(action_flag=History.ADDITION) history_entries = list(history_entries) + [addition_entry] response.context_data["action_list"] = history_entries return response class ColoredIconAdminMixin: class Media: js = [ "fontawesomefree/js/all.min.js", ] def colored_icon(self, obj): if obj.icon and obj.color_code: return obj.get_icon_as_html() class DataspacedChangeList(ChangeList): def get_results(self, request): """ Store the result_list ids in the session for the Previous and Next navigation button and "Save and go to next" feature. The session values are then used in the change_view() method of the ModelAdmin. Injects the preserved_filters on each object of the result_list to be used in list_display callable, as the request is not available there. Hierarchy links on ComponentAdmin and OwnerAdmin, as well as the annotation link on the LicenseAdmin requires this. This workaround could be removed once if the the following gets solved in Django: https://code.djangoproject.com/ticket/13659 """ super().get_results(request) for obj in self.result_list: obj._preserved_filters = self.preserved_filters self.set_reference_link(request) @property def has_filters_activated(self): return bool(self.get_filters_params()) def get_filters_params(self, params=None): lookup_params = super().get_filters_params(params) if IS_FILTER_LOOKUP_VAR in lookup_params: del lookup_params[IS_FILTER_LOOKUP_VAR] return lookup_params def set_reference_link(self, request): """Add a 'View Reference Data' or 'View My Data 'link in the changelist header.""" do_set_link = all( [ DataspaceFilter in self.model_admin.list_filter, self.model_admin.lookup_allowed(DataspaceFilter.parameter_name, None), not self.is_popup, ] ) if not do_set_link: return reference_dataspace = Dataspace.objects.get_reference() if reference_dataspace and reference_dataspace != request.user.dataspace: dataspace_id = request.GET.get(DataspaceFilter.parameter_name) if dataspace_id and dataspace_id != request.user.dataspace_id: self.my_dataspace_link = True else: params = f"?{DataspaceFilter.parameter_name}={reference_dataspace.id}" self.reference_params = params class DataspacedAdmin( DataspacedFKMixin, ProtectedFieldsMixin, AdvancedSearchAdminMixin, HistoryAdminMixin, admin.ModelAdmin, ): formfield_overrides = { models.DateField: {"widget": AdminDateWidget(attrs={"placeholder": "YYYY-MM-DD"})}, } list_filter = (DataspaceFilter,) readonly_fields = ( "dataspace", "uuid", ) actions = ["copy_to", "compare_with"] actions_to_remove = [] email_notification_on = [ADDITION, CHANGE, DELETION] save_as = True # Display only the current count show_full_result_count = False # Display a warning if any of the identifier_fields has changed identifier_fields_warning = True # Default form, customized form should always extend DataspacedAdminForm form = DataspacedAdminForm # Using extended version of base templates to avoid code duplication change_form_template = "admin/change_form_extended.html" change_list_template = "admin/change_list_extended.html" # Set this to a BaseImporter extension of the Model to enable the import importer_class = None # Set this to a DejacodeMassUpdateForm to enable the mass update action mass_update_form = None # Set this to False to disable the Activity Log feature activity_log = True # Set this to True to enable the Previous and Next buttons in change view navigation_buttons = False preserve_filters = True # Do not display the View on site links by default # Set: view_on_site = DataspacedAdmin.changeform_view_on_site # for the default obj.get_absolute_url() view_on_site = False def __init__(self, model, admin_site): self.form.admin_site = admin_site super().__init__(model, admin_site) def check(self, **kwargs): errors = super().check(**kwargs) has_wrong_form_subclass = all( [ not issubclass(self.form, DataspacedAdminForm), self.model._meta.unique_together != (("dataspace", "uuid"),), ] ) if has_wrong_form_subclass: errors.extend( [checks.Error(f"{self.form} is not a subclass of {DataspacedAdminForm}", obj=self)] ) return errors def changeform_view_on_site(self, obj): return obj.get_absolute_url() @admin.display(description=_("View")) def changelist_view_on_site(self, obj): return format_html('<a href="{}" target="_blank">View</a>', obj.get_absolute_url()) @admin.display(description=_("URN")) def urn_link(self, instance): """Attach the URN link if URN is supported on the Model.""" if instance.pk: return instance.urn_link return f"URN will be available once the {instance._meta.verbose_name} is saved." def get_queryset(self, request): qs = super().get_queryset(request) return qs.scope_for_user_in_admin(request.user) def get_changelist(self, request, **kwargs): return DataspacedChangeList def get_list_filter(self, request): """Limit the availability of `MissingInFilter` to superusers.""" list_filter = list(super().get_list_filter(request)) if not request.user.is_superuser and MissingInFilter in list_filter: del list_filter[list_filter.index(MissingInFilter)] # Custom LogEntry-based filters when field not available on the model history_filters = { "created_by": CreatedByListFilter, "last_modified_by": None, "created_date": HistoryCreatedActionTimeListFilter, "last_modified_date": HistoryModifiedActionTimeListFilter, } for field_name, default_filter in history_filters.items(): try: field = self.model._meta.get_field(field_name) except FieldDoesNotExist: if default_filter: list_filter.append(default_filter) continue filtr = field_name if isinstance(field, models.ForeignKey): filtr = (field_name, LimitToDataspaceListFilter) list_filter.append(filtr) return list_filter def get_readonly_fields(self, request, obj=None): readonly_fields = super().get_readonly_fields(request, obj) if issubclass(self.model, HistoryFieldsMixin): readonly_fields += ( "created_date", "created_by", "last_modified_date", "last_modified_by", ) return readonly_fields def change_view(self, request, object_id, form_url="", extra_context=None): """ Render the changelist using the current preserved filters to gather the previous and next id. """ context = extra_context or {} # WARNING: request.GET is important as a condition since we do not want to run this # expensive operation in case no given filter/search/sort is applied. # For example when the admin form is reached directly from the user details view. if self.navigation_buttons and request.method == "GET" and request.GET: fake_request = copy(request) query_dict = fake_request.GET.copy() preserved_filters = query_dict.pop("_changelist_filters", "") if preserved_filters: preserved_filters = force_str(preserved_filters[0]) fake_request.GET = QueryDict(preserved_filters) changelist_view = self.changelist_view(fake_request) # Sanity check, if the _changelist_filters were manually changed for example. if hasattr(changelist_view, "context_data"): # Do not use ".values_list('id', flat=True)" to avoid an extra query ids_list = [str(obj.id) for obj in changelist_view.context_data["cl"].result_list] previous_id, next_id = get_previous_next(ids_list, str(object_id)) context.update( { "previous_id": previous_id, "next_id": next_id, } ) if self.save_as and self.identifier_fields_warning: identifier_fields = self.model.get_identifier_fields() context["identifier_fields"] = identifier_fields return super().change_view(request, object_id, form_url, context) def render_change_form(self, request, context, add=False, change=False, form_url="", obj=None): response = super().render_change_form(request, context, add, change, form_url, obj) # De-activating the 'Save as' option if the user is editing an Object # belonging to another Dataspace. # We are able to update the context_data at that point as the # TemplateResponse as not been rendered yet. if obj and obj.dataspace != request.user.dataspace: response.context_data["save_as"] = False return response @staticmethod def get_selected_ids_from_request(request, queryset): select_across = request.POST.get("select_across", 0) if int(select_across): # This is the "Selected all" case, we are using the all queryset object_ids = list(queryset.values_list("id", flat=True)) else: object_ids = request.POST.getlist(ACTION_CHECKBOX_NAME) # Converting the ids list in a comma separated string, to be used # in the GET parameters return ",".join(str(id_) for id_ in object_ids) def base_action_with_redirect(self, request, queryset, viewname): ids = self.get_selected_ids_from_request(request, queryset) opts = self.model._meta preserved_filters = self.get_preserved_filters(request) view_url = reverse(f"admin:{opts.app_label}_{opts.model_name}_{viewname}") url_with_params = "{}?{}".format(view_url, urlencode({"ids": ids})) redirect_url = add_preserved_filters( {"preserved_filters": preserved_filters, "opts": opts}, url_with_params ) return redirect(redirect_url) @admin.display(description=_("Copy the selected objects")) def copy_to(self, request, queryset): """Copy the selected objects to another Dataspace.""" return self.base_action_with_redirect(request, queryset, "copy") @admin.display(description=_("Compare the selected object")) def compare_with(self, request, queryset): """Compare one selected object with a matching object in another Dataspace.""" return self.base_action_with_redirect(request, queryset, "compare") @admin.display(description=_("Check for updates in reference data")) def check_updates_in_reference(self, request, queryset): values = queryset.values_list("uuid", "last_modified_date") orm_lookups = [ models.Q(**{"uuid": uuid, "last_modified_date__gt": last_modified_date}) for uuid, last_modified_date in values ] return self.base_check_in_reference_action(request, self.model, orm_lookups) @admin.display(description=_("Check for newer versions in reference data")) def check_newer_version_in_reference(self, request, queryset): values = queryset.values_list("name", "version") orm_lookups = [ models.Q(**{"name": name, "version__gt": version}) for name, version in values ] return self.base_check_in_reference_action(request, self.model, orm_lookups) @staticmethod def base_check_in_reference_action(request, model_class, orm_lookups): reference_dataspace = Dataspace.objects.get_reference() if not reference_dataspace or not orm_lookups: return updated_qs = model_class.objects.scope(reference_dataspace).filter( reduce(operator.or_, orm_lookups) ) params = {DataspaceFilter.parameter_name: reference_dataspace.pk} changelist_href = queryset_to_changelist_href(updated_qs, params) if changelist_href: return redirect(changelist_href) messages.warning(request, "No updates available in the reference dataspace.") @staticmethod def get_changes_details(form): """ Introspect a given form to collect the changes details. Original values are collected on the DB instance (pre-save value) and New values are collected on the form (post-save value) """ if not form.instance.pk: return {} model_class = form.instance.__class__ original_instance = model_class.objects.get(pk=form.instance.pk) changes_details = [] # Using form.changed_data to only iterate on updated fields for field in form.changed_data: original_value = getattr(original_instance, field) new_value = getattr(form.instance, field) changes_details.append((field, original_value, new_value)) return {form.instance: changes_details} def save_model(self, request, obj, form, change): """Set the created_by and last_modified_by fields at save time.""" # This have no impact on save() if the model does not declare those fields. obj.last_modified_by = request.user if not change: obj.created_by = request.user # Injecting the results in the request for future use in the # log_change() method. This content will be used to add the changes # details into the notification message. # Using an OrderedDict to keep the main instance details first # The related objects changes (inlines) are gathered in # self.save_formset() request.changes_details = OrderedDict() if change and CHANGE in self.email_notification_on: request.changes_details.update(self.get_changes_details(form)) super().save_model(request, obj, form, change) def save_formset(self, request, form, formset, change): """ Set the Dataspace on the Inline instance before it's saved. Using the Dataspace of the Model instance of this ModelAdmin. Also craft the change details of the Inlines. """ for f in formset.forms: # Skip if nothing has changed in the current inline form if not f.changed_data: continue # Set the Dataspace on the Inline instance in case of addition of # the current inline. # The Dataspace is taken from the main form instance. if not f.instance.dataspace_id: f.instance.dataspace = form.instance.dataspace # Only in case of a 'change' on the main instance if change and CHANGE in self.email_notification_on: # As the `change` param is only about the main instance, we use # the pk of the inline instance to make sure we are in a # MODIFICATION case. # # If the pk of the inline instance is None, this is an ADDITION, # so skip the details creation. Also, if DELETION_FIELD_NAME is # in changed_data, we are in an inline deletion case, skipping # too. if f.instance.pk and DELETION_FIELD_NAME not in f.changed_data: # request.changes_details is created in self.save_model() request.changes_details.update(self.get_changes_details(f)) super().save_formset(request, form, formset, change) def delete_model(self, request, obj): # We are using this rather than self.log_deletion because it's not called # Here, History.log_deletion is called for each object in the bulk. History.log_deletion(request.user, obj) super().delete_model(request, obj) if DELETION in self.email_notification_on: send_notification_email(request.user, obj, DELETION) def delete_queryset(self, request, queryset): """ Add the email notification on bulk deletion through the default django 'delete_selected' action. """ send_notification_email_on_queryset(request.user, queryset, DELETION) super().delete_queryset(request, queryset) def get_urls(self): info = self.model._meta.app_label, self.model._meta.model_name urls = [] if self.activity_log: urls += [ path( "activity_log/", self.admin_site.admin_view(ActivityLog.as_view(model=self.model)), name="{}_{}_activity_log".format(*info), ) ] actions = getattr(self, "actions", []) actions_to_remove = getattr(self, "actions_to_remove", []) if "copy_to" in actions and "copy_to" not in actions_to_remove: urls += [ path( "copy/", self.admin_site.admin_view(object_copy_view), name="{}_{}_copy".format(*info), ), ] if "compare_with" in actions and "compare_with" not in actions_to_remove: urls += [ path( "compare/", self.admin_site.admin_view(object_compare_view), name="{}_{}_compare".format(*info), ), ] if self.importer_class: urls += [ path( "import/", self.admin_site.admin_view(import_view), {"importer_class": self.importer_class}, name="{}_{}_import".format(*info), ), ] return urls + super().get_urls() def get_form(self, request, obj=None, **kwargs): """ Set the `obj` instance on the `request` for future processing. Set the serialized_data of the object on the `request` to be used in the `log_change` method. Set the `request` on the `form_class`. """ if obj: request._object = obj request._serialized_data = obj.as_json() form_class = super().get_form(request, obj, **kwargs) form_class.request = request return form_class def get_fieldsets(self, request, obj=None): """Exclude form fields from the ADMIN_FORMS_CONFIGURATION settings.""" fieldsets = super().get_fieldsets(request, obj) forms_config = settings.ADMIN_FORMS_CONFIGURATION if not forms_config: return fieldsets model_config = forms_config.get(self.model._meta.model_name, {}) exclude = model_config.get("exclude", []) if not exclude: return fieldsets fieldsets_with_exclude = [] for label, entry in fieldsets: fields = entry.get("fields") if fields: entry["fields"] = [field for field in fields if field not in exclude] fieldsets_with_exclude.append((label, entry)) return fieldsets_with_exclude def get_inline_instances(self, request, obj=None): """Injects the ``request`` in each inline form to be used in validation.""" base_instances = super().get_inline_instances(request, obj) instances = [] request._object = obj for inline in base_instances: inline.form.request = request instances.append(inline) return instances def get_actions(self, request): """Limit the available actions based on who you are and what you are looking at.""" if IS_POPUP_VAR in request.GET: return OrderedDict() actions = super().get_actions(request) is_user_dataspace = DataspaceFilter.parameter_name not in request.GET can_mass_update = all( [ self.mass_update_form, has_permission(self.model, request.user, "change"), is_user_dataspace or request.user.dataspace.is_reference, ] ) if can_mass_update: actions["mass_update"] = (mass_update_action, "mass_update", "Mass update") if not has_permission(self.model, request.user, "add") and "copy_to" in actions: del actions["copy_to"] if not request.user.dataspace.is_reference: if is_user_dataspace: # The user is looking at his own Dataspace if "copy_to" in actions: del actions["copy_to"] if "compare_with" in actions: del actions["compare_with"] else: # The user is looking at another Dataspace if "delete_selected" in actions: del actions["delete_selected"] if request.user.dataspace.is_reference or not is_user_dataspace: if "check_updates_in_reference" in actions: del actions["check_updates_in_reference"] if "check_newer_version_in_reference" in actions: del actions["check_newer_version_in_reference"] for action in self.actions_to_remove: if action in actions: del actions[action] return actions @admin.display(description=_("Copy to my Dataspace")) def copy_link(self, obj): return format_html( '<strong><a href="{}&{}=1">{}</a></strong>', obj.get_copy_url(), IS_POPUP_VAR, _("Copy to my Dataspace"), ) @staticmethod def hide_display_links(request): return all( [ DataspaceFilter.parameter_name in request.GET, request.GET.get(DataspaceFilter.parameter_name) != str(request.user.dataspace_id), ] ) def get_list_display(self, request): """ Remove the view_on_site and hierarchy links in popup mode. Also insert the copy link when looking at another dataspace. """ list_display = super().get_list_display(request) if IS_POPUP_VAR in request.GET: list_display = list(list_display) if "changelist_view_on_site" in list_display: list_display.remove("changelist_view_on_site") if get_hierarchy_link in list_display: list_display.remove(get_hierarchy_link) if self.hide_display_links(request): list_display = list(list_display) if "copy_to" not in self.actions_to_remove: list_display.insert(0, "copy_link") if get_hierarchy_link in list_display: list_display.remove(get_hierarchy_link) return list_display def get_list_display_links(self, request, list_display): """Remove all the display_links when looking at another dataspace.""" if not self.hide_display_links(request): return super().get_list_display_links(request, list_display) def response_change(self, request, obj): """Add the logic for the "Save and go to next" feature.""" next_id = request.POST.get("next_id") if "_next" in request.POST and next_id: opts = self.model._meta preserved_filters = self.get_preserved_filters(request) msg_dict = { "name": str(opts.verbose_name), "obj": str(obj), } msg = 'The {name} "{obj}" was changed successfully.'.format(**msg_dict) self.message_user(request, msg, messages.SUCCESS) viewname = f"admin:{opts.app_label}_{opts.model_name}_change" next_url = reverse(viewname, args=[next_id], current_app=self.admin_site.name) redirect_url = add_preserved_filters( {"preserved_filters": preserved_filters, "opts": opts}, next_url ) return redirect(redirect_url) return super().response_change(request, obj) def lookup_allowed(self, lookup, value): if lookup in [EXTERNAL_SOURCE_LOOKUP]: return True return super().lookup_allowed(lookup, value) @staticmethod def _limited_permission(request, obj, has_perm): # Model permission if not has_perm: return False # Object instance permission if obj and obj.dataspace_id != request.user.dataspace_id: return request.user.dataspace.is_reference return True def has_add_permission(self, request): has_perm = super().has_add_permission(request) # Do not display the "Add" link in filter lookup popup mode if IS_FILTER_LOOKUP_VAR in request.GET: return False return has_perm def has_change_permission(self, request, obj=None): has_perm = super().has_change_permission(request, obj) return self._limited_permission(request, obj, has_perm) def has_delete_permission(self, request, obj=None): has_perm = super().has_delete_permission(request, obj) return self._limited_permission(request, obj, has_perm) def has_view_permission(self, request, obj=None): has_perm = super().has_view_permission(request, obj) return self._limited_permission(request, obj, has_perm) def has_importer(self): """Return True if the importer_class has been set.""" if self.importer_class: return True def has_activity_log(self): """Return True if the activity_log has been set.""" if self.activity_log: return True class HiddenValueWidget(forms.TextInput): """Render a hidden value in the UI.""" HIDDEN_VALUE = "*******" def render(self, name, value, attrs=None, renderer=None): value = self.HIDDEN_VALUE if value else None return super().render(name, value, attrs, renderer) class DataspaceConfigurationForm(forms.ModelForm): """ Configure Dataspace settings. This form includes fields for various API keys, with sensitive values hidden in the UI using the HiddenValueWidget. """ hidden_value_fields = [ "scancodeio_api_key", "vulnerablecode_api_key", "purldb_api_key", ] def __init__(self, *args, **kwargs): """Initialize the form and set HiddenValueWidget for specified fields.""" super().__init__(*args, **kwargs) for field_name in self.hidden_value_fields: self.fields[field_name].widget = HiddenValueWidget() def clean(self): """Clean the form data, excluding hidden values from cleaned_data.""" for field_name in self.hidden_value_fields: value = self.cleaned_data.get(field_name) if value == HiddenValueWidget.HIDDEN_VALUE: del self.cleaned_data[field_name] class DataspaceConfigurationInline(DataspacedFKMixin, admin.StackedInline): model = DataspaceConfiguration form = DataspaceConfigurationForm verbose_name_plural = _("Configuration") verbose_name = _("Dataspace configuration") fields = [ "homepage_layout", "scancodeio_url", "scancodeio_api_key", "vulnerablecode_url", "vulnerablecode_api_key", "purldb_url", "purldb_api_key", ] can_delete = False @admin.register(Dataspace, site=dejacode_site) class DataspaceAdmin( ReferenceOnlyPermissions, HistoryAdminMixin, admin.ModelAdmin, ): short_description = ( "A Dataspace is an independent, exclusive set of DejaCode data, " "which can be either nexB reference data or installation-specific data." ) long_description = ( "Each DJE application User is associated with exactly one Dataspace, " "and the data owned by that Dataspace is presented to the user when " "accessing the application. " "An installation of DejaCode typically contains the following Dataspaces:" "nexB: Reference reference data from nexB" "{{mySite}}: Production data for a specific DejaCode installation" "{{sandbox}}: Data for testing, training, or staging activities" ) list_display = ( "name", "full_name", AsURL("homepage_url", short_description="Homepage URL"), AsURL("contact_info", short_description="Contact information"), ) fieldsets = ( ( "", { "fields": ( "name", "homepage_url", "notes", "home_page_announcements", "logo_url", ) }, ), ( "Attribution Package Information", { "fields": ( "full_name", "address", "contact_info", "open_source_information_url", "open_source_download_url", ) }, ), ( "User Interface Settings", { "fields": ( "show_license_profile_in_license_list_view", "show_license_type_in_license_list_view", "show_spdx_short_identifier_in_license_list_view", "show_usage_policy_in_user_views", "show_type_in_component_list_view", "hide_empty_fields_in_component_details_view", ) }, ), ( "Application Process Settings", { "fields": ( "set_usage_policy_on_new_component_from_licenses", "enable_package_scanning", "update_packages_from_scan", "enable_purldb_access", "enable_vulnerablecodedb_access", ) }, ), ) search_fields = ("name",) inlines = [DataspaceConfigurationInline] form = DataspaceAdminForm change_form_template = "admin/dje/dataspace/change_form.html" change_list_template = "admin/change_list_extended.html" def has_change_permission(self, request, obj=None): """ Bypass the ReferenceOnlyPermissions to allow regular Dataspace admins, with the right permission, to edit their own Dataspace. """ return super(admin.ModelAdmin, self).has_change_permission(request, obj) def get_readonly_fields(self, request, obj=None): """Make Dataspace.name field readonly on edit except for reference Dataspace superusers.""" readonly_fields = super().get_readonly_fields(request, obj) user = request.user if obj and not (user.dataspace.is_reference and user.is_superuser): readonly_fields += ("name",) return readonly_fields def get_urls(self): info = self.model._meta.app_label, self.model._meta.model_name urls = [ path( "<pk>/clonedataset/", self.admin_site.admin_view(clone_dataset_view), name="{}_{}_clonedataset".format(*info), ), path( "<pk>/tab_permissions/", self.admin_site.admin_view(manage_tab_permissions_view), name="{}_{}_tab_permissions".format(*info), ), path( "<pk>/copy_defaults/", self.admin_site.admin_view(manage_copy_defaults_view), name="{}_{}_copy_defaults".format(*info), ), ] return urls + super().get_urls() def get_queryset(self, request): """ Limit the QuerySet to the current user Dataspace. + the Reference one. If the user Dataspace is the Reference then show all. """ qs = super().get_queryset(request) if not request.user.dataspace.is_reference: qs = qs.filter(id=request.user.dataspace_id) return qs def get_actions(self, request): """Remove the bulk delete action, it does not make sense for Dataspace.""" actions = super().get_actions(request) if "delete_selected" in actions: del actions["delete_selected"] return actions def changeform_view(self, request, object_id=None, form_url="", extra_context=None): extra_context = extra_context or {} extra_context["template_dataspace"] = settings.TEMPLATE_DATASPACE return super().changeform_view(request, object_id, form_url, extra_context) class ChildRelationshipInline(DataspacedFKMixin, admin.TabularInline): fk_name = "parent" extra = 0 classes = ("grp-collapse grp-open",) raw_id_fields = ("child",) autocomplete_lookup_fields = {"fk": ["child"]} verbose_name = _("Child") class ExternalReferenceInline(DataspacedFKMixin, GenericTabularInline): model = ExternalReference extra = 0 classes = ("grp-collapse grp-open",) @admin.register(ExternalSource, site=dejacode_site) class ExternalSourceAdmin(DataspacedAdmin): def references(self, obj): """ Return links to the content_object changelist of ExternalReference instances, for the given ExternalSource instance, grouped per ContentType. """ changelist_links = [] queryset = obj.externalreference_set grouped = group_by(queryset, "content_type", count_on="object_id", distinct=True) for value in grouped: model_class = ContentType.objects.get(id=value["content_type"]).model_class() opts = model_class._meta url = reverse(f"admin:{opts.app_label}_{opts.model_name}_changelist") params = {EXTERNAL_SOURCE_LOOKUP: obj.id} href = f"{url}?{urlencode(params)}" changelist_link = format_html( CHANGELIST_LINK_TEMPLATE, href, value["count"], opts.verbose_name_plural ) changelist_links.append([changelist_link]) html_list = "<ul>{}</ul>".format(format_html_join("", "<li>{}</li>", changelist_links))
return class_wrap(html_list, "width200")
26
2023-12-07 16:57:42+00:00
24k
wusize/CLIM
src/open_clip/model.py
[ { "identifier": "HFTextEncoder", "path": "src/open_clip/hf_model.py", "snippet": "class HFTextEncoder(nn.Module):\n \"\"\"HuggingFace model adapter\"\"\"\n output_tokens: torch.jit.Final[bool]\n\n def __init__(\n self,\n model_name_or_path: str,\n output_dim: in...
from dataclasses import dataclass from typing import Optional, Tuple, Union from torch import nn from torch.utils.checkpoint import checkpoint from .hf_model import HFTextEncoder from .modified_resnet import ModifiedResNet from .timm_model import TimmModel from .transformer import LayerNormFp32, LayerNorm, QuickGELU, Attention, VisionTransformer, TextTransformer from .utils import to_2tuple import logging import math import numpy as np import torch import torch.nn.functional as F
15,880
""" CLIP Model Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. """ @dataclass class CLIPVisionCfg: layers: Union[Tuple[int, int, int, int], int] = 12 width: int = 768 head_width: int = 64 mlp_ratio: float = 4.0 patch_size: int = 16 image_size: Union[Tuple[int, int], int] = 224 ls_init_value: Optional[float] = None # layer scale initial value patch_dropout: float = 0. # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results input_patchnorm: bool = False # whether to use dual patchnorm - would only apply the input layernorm on each patch, as post-layernorm already exist in original clip vit design global_average_pool: bool = False # whether to global average pool the last embedding layer, instead of using CLS token (https://arxiv.org/abs/2205.01580) attentional_pool: bool = False # whether to use attentional pooler in the last embedding layer n_queries: int = 256 # n_queries for attentional pooler attn_pooler_heads: int = 8 # n heads for attentional_pooling timm_model_name: str = None # a valid model name overrides layers, width, patch_size timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '') timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '') timm_proj_bias: bool = False # enable bias final projection timm_drop: float = 0. # head dropout timm_drop_path: Optional[float] = None # backbone stochastic depth output_tokens: bool = False freeze_output = True freeze_all_bns = True @dataclass class CLIPTextCfg: context_length: int = 77 vocab_size: int = 49408 width: int = 512 heads: int = 8 layers: int = 12 ls_init_value: Optional[float] = None # layer scale initial value hf_model_name: str = None hf_tokenizer_name: str = None hf_model_pretrained: bool = True proj: str = 'mlp' pooler_type: str = 'mean_pooler' embed_cls: bool = False pad_id: int = 0 output_tokens: bool = False def get_cast_dtype(precision: str): cast_dtype = None if precision == 'bf16': cast_dtype = torch.bfloat16 elif precision == 'fp16': cast_dtype = torch.float16 return cast_dtype def _build_vision_tower( embed_dim: int, vision_cfg: CLIPVisionCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None ): if isinstance(vision_cfg, dict): vision_cfg = CLIPVisionCfg(**vision_cfg) # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more # memory efficient in recent PyTorch releases (>= 1.10). # NOTE: timm models always use native GELU regardless of quick_gelu flag. act_layer = QuickGELU if quick_gelu else nn.GELU if vision_cfg.timm_model_name: visual = TimmModel( vision_cfg.timm_model_name, pretrained=vision_cfg.timm_model_pretrained, pool=vision_cfg.timm_pool, proj=vision_cfg.timm_proj, proj_bias=vision_cfg.timm_proj_bias, drop=vision_cfg.timm_drop, drop_path=vision_cfg.timm_drop_path, patch_drop=vision_cfg.patch_dropout if vision_cfg.patch_dropout > 0 else None, embed_dim=embed_dim, image_size=vision_cfg.image_size, ) act_layer = nn.GELU # so that text transformer doesn't use QuickGELU w/ timm models elif isinstance(vision_cfg.layers, (tuple, list)): vision_heads = vision_cfg.width * 32 // vision_cfg.head_width visual = ModifiedResNet( layers=vision_cfg.layers, output_dim=embed_dim, heads=vision_heads, image_size=vision_cfg.image_size, width=vision_cfg.width, freeze_output=vision_cfg.freeze_output, freeze_all_bns=vision_cfg.freeze_all_bns ) else: vision_heads = vision_cfg.width // vision_cfg.head_width
""" CLIP Model Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. """ @dataclass class CLIPVisionCfg: layers: Union[Tuple[int, int, int, int], int] = 12 width: int = 768 head_width: int = 64 mlp_ratio: float = 4.0 patch_size: int = 16 image_size: Union[Tuple[int, int], int] = 224 ls_init_value: Optional[float] = None # layer scale initial value patch_dropout: float = 0. # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results input_patchnorm: bool = False # whether to use dual patchnorm - would only apply the input layernorm on each patch, as post-layernorm already exist in original clip vit design global_average_pool: bool = False # whether to global average pool the last embedding layer, instead of using CLS token (https://arxiv.org/abs/2205.01580) attentional_pool: bool = False # whether to use attentional pooler in the last embedding layer n_queries: int = 256 # n_queries for attentional pooler attn_pooler_heads: int = 8 # n heads for attentional_pooling timm_model_name: str = None # a valid model name overrides layers, width, patch_size timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '') timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '') timm_proj_bias: bool = False # enable bias final projection timm_drop: float = 0. # head dropout timm_drop_path: Optional[float] = None # backbone stochastic depth output_tokens: bool = False freeze_output = True freeze_all_bns = True @dataclass class CLIPTextCfg: context_length: int = 77 vocab_size: int = 49408 width: int = 512 heads: int = 8 layers: int = 12 ls_init_value: Optional[float] = None # layer scale initial value hf_model_name: str = None hf_tokenizer_name: str = None hf_model_pretrained: bool = True proj: str = 'mlp' pooler_type: str = 'mean_pooler' embed_cls: bool = False pad_id: int = 0 output_tokens: bool = False def get_cast_dtype(precision: str): cast_dtype = None if precision == 'bf16': cast_dtype = torch.bfloat16 elif precision == 'fp16': cast_dtype = torch.float16 return cast_dtype def _build_vision_tower( embed_dim: int, vision_cfg: CLIPVisionCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None ): if isinstance(vision_cfg, dict): vision_cfg = CLIPVisionCfg(**vision_cfg) # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more # memory efficient in recent PyTorch releases (>= 1.10). # NOTE: timm models always use native GELU regardless of quick_gelu flag. act_layer = QuickGELU if quick_gelu else nn.GELU if vision_cfg.timm_model_name: visual = TimmModel( vision_cfg.timm_model_name, pretrained=vision_cfg.timm_model_pretrained, pool=vision_cfg.timm_pool, proj=vision_cfg.timm_proj, proj_bias=vision_cfg.timm_proj_bias, drop=vision_cfg.timm_drop, drop_path=vision_cfg.timm_drop_path, patch_drop=vision_cfg.patch_dropout if vision_cfg.patch_dropout > 0 else None, embed_dim=embed_dim, image_size=vision_cfg.image_size, ) act_layer = nn.GELU # so that text transformer doesn't use QuickGELU w/ timm models elif isinstance(vision_cfg.layers, (tuple, list)): vision_heads = vision_cfg.width * 32 // vision_cfg.head_width visual = ModifiedResNet( layers=vision_cfg.layers, output_dim=embed_dim, heads=vision_heads, image_size=vision_cfg.image_size, width=vision_cfg.width, freeze_output=vision_cfg.freeze_output, freeze_all_bns=vision_cfg.freeze_all_bns ) else: vision_heads = vision_cfg.width // vision_cfg.head_width
norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm
3
2023-12-09 05:43:08+00:00
24k
LkPrtctrd/BSL-V53
Heart/Logic/LogicLaserMessageFactory.py
[ { "identifier": "ClientHelloMessage", "path": "Heart/Packets/Client/Authentification/ClientHelloMessage.py", "snippet": "class ClientHelloMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields)...
from Heart.Packets.Client.Authentification.ClientHelloMessage import ClientHelloMessage from Heart.Packets.Client.Authentification.LoginMessage import LoginMessage from Heart.Packets.Client.Battle.AskForBattleEndMessage import AskForBattleEndMessage from Heart.Packets.Client.Home.ChangeAvatarNameMessage import ChangeAvatarNameMessage from Heart.Packets.Client.Home.EndClientTurnMessage import EndClientTurnMessage from Heart.Packets.Client.Home.GoHomeFromOfflinePractiseMessage import GoHomeFromOfflinePractiseMessage from Heart.Packets.Client.Home.GoHomeMessage import GoHomeMessage from Heart.Packets.Client.Home.GetPlayerProfileMessage import GetPlayerProfileMessage from Heart.Packets.Client.Home.AskForAllianceDataMessage import AskForAllianceDataMessage from Heart.Packets.Client.Socket.KeepAliveMessage import KeepAliveMessage from Heart.Packets.Server.Authentification.LoginFailedMessage import LoginFailedMessage from Heart.Packets.Server.Authentification.LoginOkMessage import LoginOkMessage from Heart.Packets.Server.Authentification.OutOfSyncMessage import OutOfSyncMessage from Heart.Packets.Server.Authentification.ServerHelloMessage import ServerHelloMessage from Heart.Packets.Server.Battle.BattleEndMessage import BattleEndMessage from Heart.Packets.Server.Home.AvailableServerCommandMessage import AvailableServerCommandMessage from Heart.Packets.Server.Home.LobbyInfoMessage import LobbyInfoMessage from Heart.Packets.Server.Home.OwnHomeDataMessage import OwnHomeDataMessage from Heart.Packets.Server.Socket.KeepAliveServerMessage import KeepAliveServerMessage from Heart.Packets.Server.Home.PlayerProfileMessage import PlayerProfileMessage from Heart.Packets.Server.Home.MyAllianceMessage import MyAllianceMessage from Heart.Packets.Server.Home.AllianceDataMessage import AllianceDataMessage
16,878
14362: 'TeamSetEventMessage', 14363: 'TeamSetLocationMessage', 14364: 'TeamReportChatMessage', 14365: 'TeamInviteMessage', 14366: 'PlayerStatusMessage', 14367: 'TeamClearInviteMessage', 14368: 'TeamInviteResponseMessage', 14369: 'TeamPremadeChatMessage', 14370: 'TeamAllianceMemberInviteMessage', 14371: 'TeamJoinOrCreateGameRoomMessage', 14372: 'TeamToggleSettingsMessage', 14373: 'TeamBotSlotDisableMessage', 14403: 'GetLeaderboardMessage', 14405: 'AskForAvatarStreamMessage', 14406: 'AskForBattleReplayStreamMessage', 14418: 'RemoveAvatarStreamEntryMessage', 14469: 'AlliancePremadeChatMessage', 14479: 'TeamInvitationResponseMessage', 14600: 'AvatarNameCheckRequestMessage', 14700: 'ListBrawlTvChannelsMessage', 14701: 'TuneBrawlTvChannelMessage', 14715: 'SendGlobalChatLineMessage', 14777: 'SetInvitesBlockedMessage', 14778: 'SetTeamChatMutedMessage', 14867: 'SetRegionMessage', 14880: 'TeamRequestJoinCancelMessage', 14881: 'TeamRequestJoinMessage', 14882: 'TeamRequestJoinApproveMessage', 15081: GetPlayerProfileMessage, #v50 15793: 'GetTokenFriendMessage', 16000: 'LogicDeviceLinkCodeRequestMessage', 16001: 'LogicDeviceLinkMenuClosedMessage', 16002: 'LogicDeviceLinkEnterCodeMessage', 16003: 'LogicDeviceLinkConfirmYesMessage', 16939: 'AskApiTokenMessage', 17000: 'LogicAccountTransferCodeRequestMessage', 17190: 'JoinAllianceUsingTokenMessage', 17337: 'UnbotifyReportMessage', 17338: 'AdjustPackageMessage', 17750: GoHomeFromOfflinePractiseMessage, #v50 18686: 'SetSupportedCreatorMessage', 19001: 'LatencyTestResultMessage', 19002: 'UdpLatencyTestRequestMessage', 19003: 'TriggerStartLatencyTestMessage', 19004: 'RequestLatencyTestStatusMessage', 20000: 'SetEncryptionMessage', 20100: ServerHelloMessage, 20101: 'CreateAccountOkMessage', 20103: LoginFailedMessage, 20104: LoginOkMessage, 20105: 'FriendListMessage', 20106: 'FriendListUpdateMessage', 20107: 'AddableFriendsMessage', 20108: KeepAliveServerMessage, 20109: 'FriendOnlineStatusMessage', 20110: 'FriendLoggedInMessage', 20111: 'FriendLoggedOutMessage', 20112: 'AddFriendFailedMessage', 20117: 'ReportUserStatusMessage', 20118: 'ChatAccountBanStatusMessage', 20121: 'BillingRequestFailedMessage', 20132: 'UnlockAccountOkMessage', 20133: 'UnlockAccountFailedMessage', 20151: 'AppleBillingProcessedByServerMessage', 20152: 'GoogleBillingProcessedByServerMessage', 20153: 'TencentBillingProcessedByServerMessage', 20154: 'CafeBazaarBillingProcessedByServerMessage', 20156: 'KunlunBillingProcessedByServerMessage', 20161: 'ShutdownStartedMessage', 20171: 'PersonalBreakStartedMessage', 20173: 'YoozooBillingProcessedByServerMessage', 20199: 'FriendSuggestionsMessage', 20205: 'AvatarNameChangeFailedMessage', 20206: 'AvatarOnlineStatusUpdated', 20207: 'AllianceOnlineStatusUpdatedMessage', 20300: 'AvatarNameCheckResponseMessage', 20402: 'CreateGameFailedMessage', 20405: 'MatchMakingStatusMessage', 20406: 'MatchMakingCancelledMessage', 20501: 'AcceptFriendFailedMessage', 20523: 'YoozooOrderAvailableMessage', 20545: 'YoozooOrderDeliveryFailedMessage', 20559: 'StartLoadingMessage', 20801: 'NotificationMessage', 20802: 'OpponentRejoinsMatchNotificationMessage', 20931: 'AntiAddictionDataUpdatedMessage', 22089: 'GetTokenFriendResultMessage', 22100: 'CreatePlayerMapResponseMessage', 22101: 'DeletePlayerMapResponseMessage', 22102: 'PlayerMapsMessage', 22103: 'UpdatePlayerMapResponseMessage', 22104: 'SubmitPlayerMapResponseMessage', 22105: 'PublishPlayerMapResponseMessage', 22106: 'ChangePlayerMapNameMResponseMessage', 22107: 'PlayerMapInfoUpdatedMessage', 22109: 'DebugPlayerMapReviewResultOverrideSetMessage', 22111: 'PlayerMapGreenlightedMessage', 22125: 'ReportPlayerMapResponseMessage', 22150: 'RankedMatchStartMessage', 22151: 'RankedMatchBanStartedMessage', 22152: 'RankedMatchBanHeroResponseMessage', 22153: 'RankedMatchBanEndedMessage', 22154: 'RankedMatchPickStartedMessage', 22155: 'RankedMatchPickHeroFailedMessage', 22156: 'RankedMatchHeroPickedMessage', 22157: 'RankedMatchHeroDataUpdatedMessage', 22158: 'RankedMatchFinalPreparationStartedMessage', 22159: 'RankedMatchTerminatedMessage', 22202: 'MapPreviewMessage', 22377: 'GoogleServiceAccountBoundMessage', 22687: 'GamecenterAccountAlreadyBoundMessage', 22957: 'PvpMatchmakeNotificationMessage', 23067: 'SCIDLogoutAllDevicesResultMessage', 23302: 'GetAllianceInviteTokenResultMessage', 23456: BattleEndMessage, 23457: LobbyInfoMessage, 23458: 'BattleLogMessage', 23459: 'BattleLogReplayAvailableMessage', 23494: 'GoogleServiceAccountAlreadyBoundMessage', 23774: 'PlayerJWTokenMessage',
class LogicLaserMessageFactory: messagesList = { 10055: 'AskPlayerJWTokenMessage', 10099: 'ClientCryptoErrorMessage', 10100: ClientHelloMessage, 10101: LoginMessage, 10102: 'LoginUsingSessionMessage', 10103: 'CreateAccountMessage', 10107: 'ClientCapabilitiesMessage', 10108: KeepAliveMessage, 10109: 'UdpCheckConnectionMessage', 10110: 'AnalyticEventMessage', 10111: 'AccountIdentifiersMessage', 10112: 'AuthenticationCheckMessage', 10113: 'SetDeviceTokenMessage', 10116: 'ResetAccountMessage', 10117: 'ReportUserMessage', 10118: 'AccountSwitchedMessage', 10119: 'ReportAllianceStreamMessage', 10121: 'UnlockAccountMessage', 10150: 'AppleBillingRequestMessage', 10151: 'GoogleBillingRequestMessage', 10152: 'TencentBillingRequestMessage', 10153: 'CafeBazaarBillingRequestMessage', 10159: 'KunlunBillingRequestMessage', 10160: 'BillingCancelledByClientMessage', 10177: 'ClientInfoMessage', 10212: ChangeAvatarNameMessage, 10309: 'GetAllianceInviteTokenMessage', 10321: 'AttributionEventMessage', 10401: 'CreateGameMessage', 10501: 'AcceptFriendMessage', 10502: 'AddFriendMessage', 10503: 'AskForAddableFriendsMessage', 10504: 'AskForFriendListMessage', 10506: 'RemoveFriendMessage', 10507: 'AddFriendByEmailMessage', 10509: 'AddFriendByAvatarNameAndCodeMessage', 10512: 'AskForPlayingGamecenterFriendsMessage', 10513: 'AskForPlayingFacebookFriendsMessage', 10514: 'AskForPlayingKakaoFriendsMessage', 10515: 'AskForPlayingTencentFriendsMessage', 10516: 'AskForPlayingLineFriendsMessage', 10517: 'AskForPlayingSupercellFriendsMessage', 10523: 'YoozooBillingRequestMessage', 10555: 'ClientInputMessage', 10576: 'SetBlockFriendRequestsMessage', 10599: 'AskForFriendSuggestionsMessage', 10636: 'SCIDBindAccountMessage', 11736: 'SCIDLogoutAllDevicesMessage', 12100: 'CreatePlayerMapMessage', 12101: 'DeletePlayerMapMessage', 12102: 'GetPlayerMapsMessage', 12103: 'UpdatePlayerMapMessage', 12104: 'SubmitPlayerMapMessage', 12105: 'PublishPlayerMapMessage', 12106: 'ChangePlayerMapNameMessage', 12107: 'EnterMapEditorMessage', 12108: 'GoHomeFromMapEditorMessage', 12110: 'TeamSetPlayerMapMessage', 12111: 'SignoffPlayerMapMessage', 12125: 'ReportPlayerMapMessage', 12152: 'RankedMatchBanHeroMessage', 12155: 'RankedMatchPickHeroMessage', 12157: 'RankedMatchUpdateHeroDataMessage', 12905: 'GetCurrentBattleReplayDataMessage', 12998: 'SetCountryMessage', 13922: 'AcceptTokenFriendMessage', 14101: GoHomeMessage, 14102: EndClientTurnMessage, 14103: 'StartGameMessage', 14104: 'StartSpectateMessage', 14105: 'HomeLogicStoppedMessage', 14106: 'CancelMatchmakingMessage', 14107: 'StopSpectateMessage', 14108: 'GoHomeFromSpectateMessage', #14109: GoHomeFromOfflinePractiseMessage, //before v50 14110: AskForBattleEndMessage, #14113: GetPlayerProfileMessage, //before v50 14114: 'GetBattleLogMessage', 14115: 'BattleLogViewReplayMessage', 14116: 'ViewReplayByStringMessage', 14117: 'RequestMatchCancelMessage', 14118: 'SinglePlayerMatchRequestMessage', 14166: 'ChronosEventSeenMessage', 14167: 'ChronosEventSeenMessage', 14177: 'PlayAgainMessage', 14178: 'DebugCommandMessage', 14199: 'LookForGameRoomRequestMessage', 14211: 'UnbindFacebookAccountMessage', 14201: 'BindFacebookAccountMessage', 14202: 'BindKakaoAccountMessage', 14203: 'BingLineAccountMessage', 14212: 'BindGamecenterAccountMessage', 14213: 'UnbindKakaoAccountMessage', 14214: 'UnbindLineAccountMessage', 14262: 'BindGoogleServiceAccountMessage', 14266: 'BindTencentAccountMessage', 14268: 'TencentCheckCanPayMessage', 14276: 'TencentAntiAddictionInstructionExecutedMessage', 14277: 'GetSeasonRewardsMessage', 14299: 'SetAllianceCountryMessage', 14301: 'CreateAllianceMessage', 14302: AskForAllianceDataMessage, 14303: 'AskForJoinableAlliancesListMessage', 14304: 'AskForAllianceStreamMessage', 14305: 'JoinAllianceMessage', 14306: 'ChangeAllianceMemberRoleMessage', 14307: 'KickAllianceMemberMessage', 14308: 'LeaveAllianceMessage', 14315: 'ChatToAllianceStreamMessage', 14316: 'ChangeAllianceSettingsMessage', 14317: 'RequestJoinAllianceMessage', 14321: 'RespondToAllianceJoinRequestMessage', 14322: 'SendAllianceInvitationMessage', 14323: 'JoinAllianceUsingInvitationMessage', 14324: 'SearchAlliancesMessage', 14326: 'SendAllianceInvitationToFriendMessage', 14330: 'SendAllianceMailMessage', 14350: 'TeamCreateMessage', 14351: 'TeamJoinMessage', 14352: 'TeamKickMessage', 14353: 'TeamLeaveMessage', 14354: 'TeamChangeMemberSettingsMessage', 14355: 'TeamSetMemberReadyMessage', 14356: 'TeamTogglePractiseMessage', 14357: 'TeamToggleMemberSideMessage', 14358: 'TeamSpectateMessage', 14359: 'TeamChatMessage', 14360: 'TeamPostAdMessage', 14361: 'TeamMemberStatusMessage', 14362: 'TeamSetEventMessage', 14363: 'TeamSetLocationMessage', 14364: 'TeamReportChatMessage', 14365: 'TeamInviteMessage', 14366: 'PlayerStatusMessage', 14367: 'TeamClearInviteMessage', 14368: 'TeamInviteResponseMessage', 14369: 'TeamPremadeChatMessage', 14370: 'TeamAllianceMemberInviteMessage', 14371: 'TeamJoinOrCreateGameRoomMessage', 14372: 'TeamToggleSettingsMessage', 14373: 'TeamBotSlotDisableMessage', 14403: 'GetLeaderboardMessage', 14405: 'AskForAvatarStreamMessage', 14406: 'AskForBattleReplayStreamMessage', 14418: 'RemoveAvatarStreamEntryMessage', 14469: 'AlliancePremadeChatMessage', 14479: 'TeamInvitationResponseMessage', 14600: 'AvatarNameCheckRequestMessage', 14700: 'ListBrawlTvChannelsMessage', 14701: 'TuneBrawlTvChannelMessage', 14715: 'SendGlobalChatLineMessage', 14777: 'SetInvitesBlockedMessage', 14778: 'SetTeamChatMutedMessage', 14867: 'SetRegionMessage', 14880: 'TeamRequestJoinCancelMessage', 14881: 'TeamRequestJoinMessage', 14882: 'TeamRequestJoinApproveMessage', 15081: GetPlayerProfileMessage, #v50 15793: 'GetTokenFriendMessage', 16000: 'LogicDeviceLinkCodeRequestMessage', 16001: 'LogicDeviceLinkMenuClosedMessage', 16002: 'LogicDeviceLinkEnterCodeMessage', 16003: 'LogicDeviceLinkConfirmYesMessage', 16939: 'AskApiTokenMessage', 17000: 'LogicAccountTransferCodeRequestMessage', 17190: 'JoinAllianceUsingTokenMessage', 17337: 'UnbotifyReportMessage', 17338: 'AdjustPackageMessage', 17750: GoHomeFromOfflinePractiseMessage, #v50 18686: 'SetSupportedCreatorMessage', 19001: 'LatencyTestResultMessage', 19002: 'UdpLatencyTestRequestMessage', 19003: 'TriggerStartLatencyTestMessage', 19004: 'RequestLatencyTestStatusMessage', 20000: 'SetEncryptionMessage', 20100: ServerHelloMessage, 20101: 'CreateAccountOkMessage', 20103: LoginFailedMessage, 20104: LoginOkMessage, 20105: 'FriendListMessage', 20106: 'FriendListUpdateMessage', 20107: 'AddableFriendsMessage', 20108: KeepAliveServerMessage, 20109: 'FriendOnlineStatusMessage', 20110: 'FriendLoggedInMessage', 20111: 'FriendLoggedOutMessage', 20112: 'AddFriendFailedMessage', 20117: 'ReportUserStatusMessage', 20118: 'ChatAccountBanStatusMessage', 20121: 'BillingRequestFailedMessage', 20132: 'UnlockAccountOkMessage', 20133: 'UnlockAccountFailedMessage', 20151: 'AppleBillingProcessedByServerMessage', 20152: 'GoogleBillingProcessedByServerMessage', 20153: 'TencentBillingProcessedByServerMessage', 20154: 'CafeBazaarBillingProcessedByServerMessage', 20156: 'KunlunBillingProcessedByServerMessage', 20161: 'ShutdownStartedMessage', 20171: 'PersonalBreakStartedMessage', 20173: 'YoozooBillingProcessedByServerMessage', 20199: 'FriendSuggestionsMessage', 20205: 'AvatarNameChangeFailedMessage', 20206: 'AvatarOnlineStatusUpdated', 20207: 'AllianceOnlineStatusUpdatedMessage', 20300: 'AvatarNameCheckResponseMessage', 20402: 'CreateGameFailedMessage', 20405: 'MatchMakingStatusMessage', 20406: 'MatchMakingCancelledMessage', 20501: 'AcceptFriendFailedMessage', 20523: 'YoozooOrderAvailableMessage', 20545: 'YoozooOrderDeliveryFailedMessage', 20559: 'StartLoadingMessage', 20801: 'NotificationMessage', 20802: 'OpponentRejoinsMatchNotificationMessage', 20931: 'AntiAddictionDataUpdatedMessage', 22089: 'GetTokenFriendResultMessage', 22100: 'CreatePlayerMapResponseMessage', 22101: 'DeletePlayerMapResponseMessage', 22102: 'PlayerMapsMessage', 22103: 'UpdatePlayerMapResponseMessage', 22104: 'SubmitPlayerMapResponseMessage', 22105: 'PublishPlayerMapResponseMessage', 22106: 'ChangePlayerMapNameMResponseMessage', 22107: 'PlayerMapInfoUpdatedMessage', 22109: 'DebugPlayerMapReviewResultOverrideSetMessage', 22111: 'PlayerMapGreenlightedMessage', 22125: 'ReportPlayerMapResponseMessage', 22150: 'RankedMatchStartMessage', 22151: 'RankedMatchBanStartedMessage', 22152: 'RankedMatchBanHeroResponseMessage', 22153: 'RankedMatchBanEndedMessage', 22154: 'RankedMatchPickStartedMessage', 22155: 'RankedMatchPickHeroFailedMessage', 22156: 'RankedMatchHeroPickedMessage', 22157: 'RankedMatchHeroDataUpdatedMessage', 22158: 'RankedMatchFinalPreparationStartedMessage', 22159: 'RankedMatchTerminatedMessage', 22202: 'MapPreviewMessage', 22377: 'GoogleServiceAccountBoundMessage', 22687: 'GamecenterAccountAlreadyBoundMessage', 22957: 'PvpMatchmakeNotificationMessage', 23067: 'SCIDLogoutAllDevicesResultMessage', 23302: 'GetAllianceInviteTokenResultMessage', 23456: BattleEndMessage, 23457: LobbyInfoMessage, 23458: 'BattleLogMessage', 23459: 'BattleLogReplayAvailableMessage', 23494: 'GoogleServiceAccountAlreadyBoundMessage', 23774: 'PlayerJWTokenMessage',
24101: OwnHomeDataMessage,
17
2023-12-14 18:57:56+00:00
24k
GXNU-ZhongLab/ODTrack
lib/train/base_functions.py
[ { "identifier": "Lasot", "path": "lib/train/dataset/lasot.py", "snippet": "class Lasot(BaseVideoDataset):\n \"\"\" LaSOT dataset.\n\n Publication:\n LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\n Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, H...
import os import torch import lib.train.data.transforms as tfm from torch.utils.data.distributed import DistributedSampler from lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet from lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb from lib.train.data import sampler, opencv_loader, processing, LTRLoader from lib.utils.misc import is_main_process
18,653
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", ] # Tracking Task if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb") datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader)) else: datasets.append(ImagenetVID(settings.env.imagenet_dir, image_loader=image_loader)) if name == "TRACKINGNET": if settings.use_lmdb: print("Building TrackingNet from lmdb") datasets.append(TrackingNet_lmdb(settings.env.trackingnet_lmdb_dir, image_loader=image_loader)) else: # raise ValueError("NOW WE CAN ONLY USE TRACKINGNET FROM LMDB") datasets.append(TrackingNet(settings.env.trackingnet_dir, image_loader=image_loader)) return datasets def build_dataloaders(cfg, settings): # Data transform transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05), tfm.RandomHorizontalFlip(probability=0.5)) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.RandomHorizontalFlip_Norm(probability=0.5), # tfm.RandomHorizontalFlip(probability=0.5), tfm.Normalize(mean=cfg.DATA.MEAN, std=cfg.DATA.STD)) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=cfg.DATA.MEAN, std=cfg.DATA.STD)) # The tracking pairs processing module output_sz = settings.output_sz search_area_factor = settings.search_area_factor data_processing_train = processing.STARKProcessing(search_area_factor=search_area_factor, output_sz=output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', transform=transform_train, joint_transform=transform_joint, settings=settings) data_processing_val = processing.STARKProcessing(search_area_factor=search_area_factor, output_sz=output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', transform=transform_val, joint_transform=transform_joint, settings=settings) # Train sampler and loader settings.num_template = getattr(cfg.DATA.TEMPLATE, "NUMBER", 1) settings.num_search = getattr(cfg.DATA.SEARCH, "NUMBER", 1) sampler_mode = getattr(cfg.DATA, "SAMPLER_MODE", "causal") train_cls = getattr(cfg.TRAIN, "TRAIN_CLS", False) print("sampler_mode: ", sampler_mode)
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", ] # Tracking Task if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb") datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader)) else: datasets.append(ImagenetVID(settings.env.imagenet_dir, image_loader=image_loader)) if name == "TRACKINGNET": if settings.use_lmdb: print("Building TrackingNet from lmdb") datasets.append(TrackingNet_lmdb(settings.env.trackingnet_lmdb_dir, image_loader=image_loader)) else: # raise ValueError("NOW WE CAN ONLY USE TRACKINGNET FROM LMDB") datasets.append(TrackingNet(settings.env.trackingnet_dir, image_loader=image_loader)) return datasets def build_dataloaders(cfg, settings): # Data transform transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05), tfm.RandomHorizontalFlip(probability=0.5)) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.RandomHorizontalFlip_Norm(probability=0.5), # tfm.RandomHorizontalFlip(probability=0.5), tfm.Normalize(mean=cfg.DATA.MEAN, std=cfg.DATA.STD)) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=cfg.DATA.MEAN, std=cfg.DATA.STD)) # The tracking pairs processing module output_sz = settings.output_sz search_area_factor = settings.search_area_factor data_processing_train = processing.STARKProcessing(search_area_factor=search_area_factor, output_sz=output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', transform=transform_train, joint_transform=transform_joint, settings=settings) data_processing_val = processing.STARKProcessing(search_area_factor=search_area_factor, output_sz=output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', transform=transform_val, joint_transform=transform_joint, settings=settings) # Train sampler and loader settings.num_template = getattr(cfg.DATA.TEMPLATE, "NUMBER", 1) settings.num_search = getattr(cfg.DATA.SEARCH, "NUMBER", 1) sampler_mode = getattr(cfg.DATA, "SAMPLER_MODE", "causal") train_cls = getattr(cfg.TRAIN, "TRAIN_CLS", False) print("sampler_mode: ", sampler_mode)
dataset_train = sampler.TrackingSampler(datasets=names2datasets(cfg.DATA.TRAIN.DATASETS_NAME, settings, opencv_loader),
13
2023-12-10 03:57:19+00:00
24k