repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
jiangjiechen/auction-arena
auction_workflow.py
[ { "identifier": "Auctioneer", "path": "src/auctioneer_base.py", "snippet": "class Auctioneer(BaseModel):\n enable_discount: bool = False\n items: List[Item] = []\n cur_item: Item = None\n highest_bidder: Bidder = None\n highest_bid: int = -1\n bidding_history = defaultdict(list) # hist...
import os import time import gradio as gr import ujson as json import traceback import argparse from typing import List from tqdm import tqdm from src.auctioneer_base import Auctioneer from src.bidder_base import Bidder, bidders_to_chatbots, bidding_multithread from utils import trace_back from src.item_base import create_items from src.bidder_base import create_bidders from transformers import GPT2TokenizerFast
12,711
LOG_DIR = 'logs' enable_gr = gr.update(interactive=True) disable_gr = gr.update(interactive=False)
LOG_DIR = 'logs' enable_gr = gr.update(interactive=True) disable_gr = gr.update(interactive=False)
def monitor_all(bidder_list: List[Bidder]):
1
2023-10-08 09:30:57+00:00
16k
SH1ROd/Bert-VITS2-Integration-train-txt-infer
train_ms.py
[ { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from...
import os import json import argparse import itertools import math import torch import shutil import torch.multiprocessing as mp import torch.distributed as dist import logging import commons import utils from torch import nn, optim from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, ) from losses import ( generator_loss, discriminator_loss, feature_loss, kl_loss ) from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
10,914
optim_d, skip_optimizer=not hps.cont) epoch_str = max(epoch_str, 1) global_step = (epoch_str - 1) * len(train_loader) except Exception as e: print(e) epoch_str = 1 global_step = 0 else: _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g, optim_g, True) _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d, optim_d, True) scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) if net_dur_disc is not None: scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval],role=role) else: train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, role=role) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, role): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in tqdm(enumerate(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True) spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) speakers = speakers.cuda(rank, non_blocking=True) tone = tone.cuda(rank, non_blocking=True) language = language.cuda(rank, non_blocking=True) bert = bert.cuda(rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_) = net_g(x, x_lengths, spec, spec_lengths, speakers, tone, language, bert) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax) y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax ) y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) loss_disc_all = loss_disc if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach()) with autocast(enabled=False): # TODO: I think need to mean using the mask, but for now, just mean all loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g = discriminator_loss(y_dur_hat_r, y_dur_hat_g) loss_dur_disc_all = loss_dur_disc optim_dur_disc.zero_grad() scaler.scale(loss_dur_disc_all).backward() scaler.unscale_(optim_dur_disc) grad_norm_dur_disc = commons.clip_grad_value_(net_dur_disc.parameters(), None) scaler.step(optim_dur_disc) optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.fp16_run): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl loss_fm = feature_loss(fmap_r, fmap_g)
logging.getLogger('numba').setLevel(logging.WARNING) torch.backends.cudnn.benchmark = True torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True torch.set_float32_matmul_precision('medium') global_step = 0 def main(): """Assume Single Node Multi GPUs Training Only""" assert torch.cuda.is_available(), "CPU training is not allowed." n_gpus = torch.cuda.device_count() os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = '65280' hps = utils.get_hparams() role='' for t in hps.data.spk2id.items(): role=t[0] if not hps.cont: folder_path = f"./logs/{role}" if not os.path.exists(folder_path): os.makedirs(folder_path) print(f"文件夹 '{role}' 已创建在 './logs/' 目录下。") else: print(f"文件夹 '{role}' 已经存在于 './logs/' 目录下。") shutil.copy('./pretrained_models/D_0.pth',f'./logs/{role}/D_0.pth') shutil.copy('./pretrained_models/G_0.pth',f'./logs/{role}/G_0.pth') shutil.copy('./pretrained_models/DUR_0.pth',f'./logs/{role}/DUR_0.pth') mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps, role)) def run(rank, n_gpus, hps, role): global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank) torch.manual_seed(hps.train.seed) torch.cuda.set_device(rank) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader(train_dataset, num_workers=2, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler) if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn) if "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True: print("Using noise scaled MAS for VITS2") use_noise_scaled_mas = True mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") use_noise_scaled_mas = False mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True: print("Using duration discriminator for VITS2") use_duration_discriminator = True net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(rank) if "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True: if hps.data.n_speakers == 0: raise ValueError("n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model") use_spk_conditioned_encoder = True else: print("Using normal encoder for VITS1") use_spk_conditioned_encoder = False net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial = mas_noise_scale_initial, noise_scale_delta = noise_scale_delta, **hps.model).cuda(rank) freeze_enc = getattr(hps.model, "freeze_enc", False) if freeze_enc: print("freeze encoder !!!") for param in net_g.enc_p.parameters(): param.requires_grad = False net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) optim_g = torch.optim.AdamW( filter(lambda p: p.requires_grad, net_g.parameters()), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) if net_dur_disc is not None: optim_dur_disc = torch.optim.AdamW( net_dur_disc.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) else: optim_dur_disc = None net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) if net_dur_disc is not None: net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True) pretrain_dir = None if pretrain_dir is None: try: if net_dur_disc is not None: _, optim_dur_disc, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=not hps.cont) _, optim_g, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g, skip_optimizer=not hps.cont) _, optim_d, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=not hps.cont) epoch_str = max(epoch_str, 1) global_step = (epoch_str - 1) * len(train_loader) except Exception as e: print(e) epoch_str = 1 global_step = 0 else: _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g, optim_g, True) _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d, optim_d, True) scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) if net_dur_disc is not None: scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval],role=role) else: train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, role=role) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, role): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in tqdm(enumerate(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True) spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) speakers = speakers.cuda(rank, non_blocking=True) tone = tone.cuda(rank, non_blocking=True) language = language.cuda(rank, non_blocking=True) bert = bert.cuda(rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_) = net_g(x, x_lengths, spec, spec_lengths, speakers, tone, language, bert) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax) y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax ) y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) loss_disc_all = loss_disc if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach()) with autocast(enabled=False): # TODO: I think need to mean using the mask, but for now, just mean all loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g = discriminator_loss(y_dur_hat_r, y_dur_hat_g) loss_dur_disc_all = loss_dur_disc optim_dur_disc.zero_grad() scaler.scale(loss_dur_disc_all).backward() scaler.unscale_(optim_dur_disc) grad_norm_dur_disc = commons.clip_grad_value_(net_dur_disc.parameters(), None) scaler.step(optim_dur_disc) optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.fp16_run): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl loss_fm = feature_loss(fmap_r, fmap_g)
loss_gen, losses_gen = generator_loss(y_d_hat_g)
6
2023-10-10 02:23:23+00:00
16k
sakemin/cog-musicgen-chord
audiocraft/modules/conditioners.py
[ { "identifier": "ChromaExtractor", "path": "audiocraft/modules/chroma.py", "snippet": "class ChromaExtractor(nn.Module):\n \"\"\"Chroma extraction and quantization.\n\n Args:\n sample_rate (int): Sample rate for the chroma extraction.\n n_chroma (int): Number of chroma bins for the c...
from collections import defaultdict from copy import deepcopy from dataclasses import dataclass, field from itertools import chain from pathlib import Path from num2words import num2words from transformers import RobertaTokenizer, T5EncoderModel, T5Tokenizer # type: ignore from torch import nn from torch.nn.utils.rnn import pad_sequence from .chroma import ChromaExtractor from .chord_chroma import ChordExtractor from .streaming import StreamingModule from .transformer import create_sin_embedding from ..data.audio import audio_read from ..data.audio_dataset import SegmentInfo from ..data.audio_utils import convert_audio from ..environment import AudioCraftEnvironment from ..quantization import ResidualVectorQuantizer from ..utils.autocast import TorchAutocast from ..utils.cache import EmbeddingCache from ..utils.utils import collate, hash_trick, length_to_mask, load_clap_state_dict, warn_once from .btc.utils import chords from demucs import pretrained from audiocraft.data.audio_dataset import AudioDataset from demucs.apply import apply_model from demucs.audio import convert_audio from demucs import pretrained from audiocraft.data.audio_dataset import AudioDataset from demucs.apply import apply_model from demucs.audio import convert_audio import logging import math import random import re import typing as tp import warnings import einops import spacy import torch import torch.nn.functional as F import numpy as np import laion_clap # type: ignore
13,982
return self.chroma(wav) @torch.no_grad() def _compute_wav_embedding(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Compute wav embedding, applying stem and chroma extraction.""" # avoid 0-size tensors when we are working with null conds if wav.shape[-1] == 1: return self._extract_chroma(wav) stems = self._get_stemmed_wav(wav, sample_rate) chroma = self._extract_chroma(stems) return chroma @torch.no_grad() def _get_full_chroma_for_cache(self, path: tp.Union[str, Path], x: WavCondition, idx: int) -> torch.Tensor: """Extract chroma from the whole audio waveform at the given path.""" wav, sr = audio_read(path) wav = wav[None].to(self.device) wav = convert_audio(wav, sr, self.sample_rate, to_channels=1) chroma = self._compute_wav_embedding(wav, self.sample_rate)[0] return chroma def _extract_chroma_chunk(self, full_chroma: torch.Tensor, x: WavCondition, idx: int) -> torch.Tensor: """Extract a chunk of chroma from the full chroma derived from the full waveform.""" wav_length = x.wav.shape[-1] seek_time = x.seek_time[idx] assert seek_time is not None, ( "WavCondition seek_time is required " "when extracting chroma chunks from pre-computed chroma.") full_chroma = full_chroma.float() frame_rate = self.sample_rate / self._downsampling_factor() target_length = int(frame_rate * wav_length / self.sample_rate) index = int(frame_rate * seek_time) out = full_chroma[index: index + target_length] out = F.pad(out[None], (0, 0, 0, target_length - out.shape[0]))[0] return out.to(self.device) @torch.no_grad() def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: """Get the wav embedding from the WavCondition. The conditioner will either extract the embedding on-the-fly computing it from the condition wav directly or will rely on the embedding cache to load the pre-computed embedding if relevant. """ sampled_wav: tp.Optional[torch.Tensor] = None if not self.training and self.eval_wavs is not None: warn_once(logger, "Using precomputed evaluation wavs!") sampled_wav = self._sample_eval_wavs(len(x.wav)) no_undefined_paths = all(p is not None for p in x.path) no_nullified_cond = x.wav.shape[-1] > 1 if sampled_wav is not None: chroma = self._compute_wav_embedding(sampled_wav, self.sample_rate) elif self.cache is not None and no_undefined_paths and no_nullified_cond: paths = [Path(p) for p in x.path if p is not None] chroma = self.cache.get_embed_from_cache(paths, x) else: assert all(sr == x.sample_rate[0] for sr in x.sample_rate), "All sample rates in batch should be equal." chroma = self._compute_wav_embedding(x.wav, x.sample_rate[0]) if self.match_len_on_eval: B, T, C = chroma.shape if T > self.chroma_len: chroma = chroma[:, :self.chroma_len] logger.debug(f"Chroma was truncated to match length! ({T} -> {chroma.shape[1]})") elif T < self.chroma_len: n_repeat = int(math.ceil(self.chroma_len / T)) chroma = chroma.repeat(1, n_repeat, 1) chroma = chroma[:, :self.chroma_len] logger.debug(f"Chroma was repeated to match length! ({T} -> {chroma.shape[1]})") return chroma def tokenize(self, x: WavCondition) -> WavCondition: """Apply WavConditioner tokenization and populate cache if needed.""" x = super().tokenize(x) no_undefined_paths = all(p is not None for p in x.path) if self.cache is not None and no_undefined_paths: paths = [Path(p) for p in x.path if p is not None] self.cache.populate_embed_cache(paths, x) return x class ChromaChordConditioner(ChromaStemConditioner): """Chord Chroma conditioner based on stems. The ChromaChordConditioner uses DEMUCS to first filter out drums and bass, as the drums and bass often dominate the chroma leading to the chroma features not containing information about the melody. Args: output_dim (int): Output dimension for the conditioner. sample_rate (int): Sample rate for the chroma extractor. n_chroma (int): Number of chroma bins for the chroma extractor. radix2_exp (int): Size of stft window for the chroma extractor (power of 2, e.g. 12 -> 2^12). duration (int): duration used during training. This is later used for correct padding in case we are using chroma as prefix. match_len_on_eval (bool, optional): if True then all chromas are padded to the training duration. Defaults to False. eval_wavs (str, optional): path to a dataset manifest with waveform, this waveforms are used as conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). Defaults to None. n_eval_wavs (int, optional): limits the number of waveforms used for conditioning. Defaults to 0. device (tp.Union[torch.device, str], optional): Device for the conditioner. **kwargs: Additional parameters for the chroma extractor. """ def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None, n_eval_wavs: int = 0, cache_path: tp.Optional[tp.Union[str, Path]] = None, device: tp.Union[torch.device, str] = 'cpu', **kwargs): super().__init__(output_dim = output_dim, sample_rate = sample_rate, n_chroma = n_chroma, radix2_exp = radix2_exp, duration = duration, match_len_on_eval = match_len_on_eval, eval_wavs = eval_wavs, n_eval_wavs = n_eval_wavs, cache_path = cache_path, device = device) self.winhop = self.chroma.winhop self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device) stem_sources: list = self.demucs.sources self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('bass'), stem_sources.index('other')]).to(device) self.chroma_len = self._get_chroma_len() self.bar2chromabin = self.sample_rate / self.winhop self.chroma = ChordExtractor(device = device, sample_rate=sample_rate, n_chroma=n_chroma, max_duration = duration, chroma_len = self.chroma_len, winhop = self.winhop).to(device)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. logger = logging.getLogger(__name__) TextCondition = tp.Optional[str] # a text condition can be a string or None (if doesn't exist) ConditionType = tp.Tuple[torch.Tensor, torch.Tensor] # condition, mask class WavCondition(tp.NamedTuple): wav: torch.Tensor length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] class WavChordTextCondition(tp.NamedTuple): wav: tp.Union[torch.Tensor,str,tp.List[str]] length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] bpm : tp.List[tp.Optional[tp.Union[int, float]]] = [] meter : tp.List[tp.Optional[int]] = [] class JointEmbedCondition(tp.NamedTuple): wav: torch.Tensor text: tp.List[tp.Optional[str]] length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] @dataclass class ConditioningAttributes: text: tp.Dict[str, tp.Optional[str]] = field(default_factory=dict) wav: tp.Dict[str, tp.Union[WavCondition,WavChordTextCondition]] = field(default_factory=dict) joint_embed: tp.Dict[str, JointEmbedCondition] = field(default_factory=dict) def __getitem__(self, item): return getattr(self, item) @property def text_attributes(self): return self.text.keys() @property def wav_attributes(self): return self.wav.keys() @property def joint_embed_attributes(self): return self.joint_embed.keys() @property def attributes(self): return { "text": self.text_attributes, "wav": self.wav_attributes, "joint_embed": self.joint_embed_attributes, } def to_flat_dict(self): return { **{f"text.{k}": v for k, v in self.text.items()}, **{f"wav.{k}": v for k, v in self.wav.items()}, **{f"joint_embed.{k}": v for k, v in self.joint_embed.items()} } @classmethod def from_flat_dict(cls, x): out = cls() for k, v in x.items(): kind, att = k.split(".") out[kind][att] = v return out class SegmentWithAttributes(SegmentInfo): """Base class for all dataclasses that are used for conditioning. All child classes should implement `to_condition_attributes` that converts the existing attributes to a dataclass of type ConditioningAttributes. """ def to_condition_attributes(self) -> ConditioningAttributes: raise NotImplementedError() def nullify_condition(condition: ConditionType, dim: int = 1): """Transform an input condition to a null condition. The way it is done by converting it to a single zero vector similarly to how it is done inside WhiteSpaceTokenizer and NoopTokenizer. Args: condition (ConditionType): A tuple of condition and mask (tuple[torch.Tensor, torch.Tensor]) dim (int): The dimension that will be truncated (should be the time dimension) WARNING!: dim should not be the batch dimension! Returns: ConditionType: A tuple of null condition and mask """ assert dim != 0, "dim cannot be the batch dimension!" assert isinstance(condition, tuple) and \ isinstance(condition[0], torch.Tensor) and \ isinstance(condition[1], torch.Tensor), "'nullify_condition' got an unexpected input type!" cond, mask = condition B = cond.shape[0] last_dim = cond.dim() - 1 out = cond.transpose(dim, last_dim) out = 0. * out[..., :1] out = out.transpose(dim, last_dim) mask = torch.zeros((B, 1), device=out.device).int() assert cond.dim() == out.dim() return out, mask def nullify_wav(cond: tp.Union[WavCondition,WavChordTextCondition]) -> tp.Union[WavCondition,WavChordTextCondition]: """Transform a WavCondition to a nullified WavCondition. It replaces the wav by a null tensor, forces its length to 0, and replaces metadata by dummy attributes. Args: cond (WavCondition): Wav condition with wav, tensor of shape [B, T]. Returns: WavCondition: Nullified wav condition. """ if not isinstance(cond, WavChordTextCondition): null_wav, _ = nullify_condition((cond.wav, torch.zeros_like(cond.wav)), dim=cond.wav.dim() - 1) return WavCondition( wav=null_wav, length=torch.tensor([0] * cond.wav.shape[0], device=cond.wav.device), sample_rate=cond.sample_rate, path=[None] * cond.wav.shape[0], seek_time=[None] * cond.wav.shape[0], ) else: return WavChordTextCondition( wav=['N']* len(cond.wav), length=torch.tensor([0] * len(cond.wav), device=cond.length.device), sample_rate=cond.sample_rate, path=[None], seek_time=[None], bpm = cond.bpm, meter = cond.meter ) def nullify_joint_embed(embed: JointEmbedCondition) -> JointEmbedCondition: """Nullify the joint embedding condition by replacing it by a null tensor, forcing its length to 0, and replacing metadata by dummy attributes. Args: cond (JointEmbedCondition): Joint embedding condition with wav and text, wav tensor of shape [B, C, T]. """ null_wav, _ = nullify_condition((embed.wav, torch.zeros_like(embed.wav)), dim=embed.wav.dim() - 1) return JointEmbedCondition( wav=null_wav, text=[None] * len(embed.text), length=torch.LongTensor([0]).to(embed.wav.device), sample_rate=embed.sample_rate, path=[None] * embed.wav.shape[0], seek_time=[0] * embed.wav.shape[0], ) class Tokenizer: """Base tokenizer implementation (in case we want to introduce more advances tokenizers in the future). """ def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError() class WhiteSpaceTokenizer(Tokenizer): """This tokenizer should be used for natural language descriptions. For example: ["he didn't, know he's going home.", 'shorter sentence'] => [[78, 62, 31, 4, 78, 25, 19, 34], [59, 77, 0, 0, 0, 0, 0, 0]] """ PUNCTUATION = "?:!.,;" def __init__(self, n_bins: int, pad_idx: int = 0, language: str = "en_core_web_sm", lemma: bool = True, stopwords: bool = True) -> None: self.n_bins = n_bins self.pad_idx = pad_idx self.lemma = lemma self.stopwords = stopwords try: self.nlp = spacy.load(language) except IOError: spacy.cli.download(language) # type: ignore self.nlp = spacy.load(language) @tp.no_type_check def __call__(self, texts: tp.List[tp.Optional[str]], return_text: bool = False) -> tp.Tuple[torch.Tensor, torch.Tensor]: """Take a list of strings and convert them to a tensor of indices. Args: texts (list[str]): List of strings. return_text (bool, optional): Whether to return text as additional tuple item. Defaults to False. Returns: tuple[torch.Tensor, torch.Tensor]: - Indices of words in the LUT. - And a mask indicating where the padding tokens are """ output, lengths = [], [] texts = deepcopy(texts) for i, text in enumerate(texts): # if current sample doesn't have a certain attribute, replace with pad token if text is None: output.append(torch.Tensor([self.pad_idx])) lengths.append(0) continue # convert numbers to words text = re.sub(r"(\d+)", lambda x: num2words(int(x.group(0))), text) # type: ignore # normalize text text = self.nlp(text) # type: ignore # remove stopwords if self.stopwords: text = [w for w in text if not w.is_stop] # type: ignore # remove punctuation text = [w for w in text if w.text not in self.PUNCTUATION] # type: ignore # lemmatize if needed text = [getattr(t, "lemma_" if self.lemma else "text") for t in text] # type: ignore texts[i] = " ".join(text) lengths.append(len(text)) # convert to tensor tokens = torch.Tensor([hash_trick(w, self.n_bins) for w in text]) output.append(tokens) mask = length_to_mask(torch.IntTensor(lengths)).int() padded_output = pad_sequence(output, padding_value=self.pad_idx).int().t() if return_text: return padded_output, mask, texts # type: ignore return padded_output, mask class NoopTokenizer(Tokenizer): """This tokenizer should be used for global conditioners such as: artist, genre, key, etc. The difference between this and WhiteSpaceTokenizer is that NoopTokenizer does not split strings, so "Jeff Buckley" will get it's own index. Whereas WhiteSpaceTokenizer will split it to ["Jeff", "Buckley"] and return an index per word. For example: ["Queen", "ABBA", "Jeff Buckley"] => [43, 55, 101] ["Metal", "Rock", "Classical"] => [0, 223, 51] """ def __init__(self, n_bins: int, pad_idx: int = 0): self.n_bins = n_bins self.pad_idx = pad_idx def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: output, lengths = [], [] for text in texts: # if current sample doesn't have a certain attribute, replace with pad token if text is None: output.append(self.pad_idx) lengths.append(0) else: output.append(hash_trick(text, self.n_bins)) lengths.append(1) tokens = torch.LongTensor(output).unsqueeze(1) mask = length_to_mask(torch.IntTensor(lengths)).int() return tokens, mask class BaseConditioner(nn.Module): """Base model for all conditioner modules. We allow the output dim to be different than the hidden dim for two reasons: 1) keep our LUTs small when the vocab is large; 2) make all condition dims consistent. Args: dim (int): Hidden dim of the model. output_dim (int): Output dim of the conditioner. """ def __init__(self, dim: int, output_dim: int): super().__init__() self.dim = dim self.output_dim = output_dim self.output_proj = nn.Linear(dim, output_dim) def tokenize(self, *args, **kwargs) -> tp.Any: """Should be any part of the processing that will lead to a synchronization point, e.g. BPE tokenization with transfer to the GPU. The returned value will be saved and return later when calling forward(). """ raise NotImplementedError() def forward(self, inputs: tp.Any) -> ConditionType: """Gets input that should be used as conditioning (e.g, genre, description or a waveform). Outputs a ConditionType, after the input data was embedded as a dense vector. Returns: ConditionType: - A tensor of size [B, T, D] where B is the batch size, T is the length of the output embedding and D is the dimension of the embedding. - And a mask indicating where the padding tokens. """ raise NotImplementedError() class TextConditioner(BaseConditioner): ... class LUTConditioner(TextConditioner): """Lookup table TextConditioner. Args: n_bins (int): Number of bins. dim (int): Hidden dim of the model (text-encoder/LUT). output_dim (int): Output dim of the conditioner. tokenizer (str): Name of the tokenizer. pad_idx (int, optional): Index for padding token. Defaults to 0. """ def __init__(self, n_bins: int, dim: int, output_dim: int, tokenizer: str, pad_idx: int = 0): super().__init__(dim, output_dim) self.embed = nn.Embedding(n_bins, dim) self.tokenizer: Tokenizer if tokenizer == 'whitespace': self.tokenizer = WhiteSpaceTokenizer(n_bins, pad_idx=pad_idx) elif tokenizer == 'noop': self.tokenizer = NoopTokenizer(n_bins, pad_idx=pad_idx) else: raise ValueError(f"unrecognized tokenizer `{tokenizer}`.") def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: device = self.embed.weight.device tokens, mask = self.tokenizer(x) tokens, mask = tokens.to(device), mask.to(device) return tokens, mask def forward(self, inputs: tp.Tuple[torch.Tensor, torch.Tensor]) -> ConditionType: tokens, mask = inputs embeds = self.embed(tokens) embeds = self.output_proj(embeds) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class T5Conditioner(TextConditioner): """T5-based TextConditioner. Args: name (str): Name of the T5 model. output_dim (int): Output dim of the conditioner. finetune (bool): Whether to fine-tune T5 at train time. device (str): Device for T5 Conditioner. autocast_dtype (tp.Optional[str], optional): Autocast dtype. word_dropout (float, optional): Word dropout probability. normalize_text (bool, optional): Whether to apply text normalization. """ MODELS = ["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large", "google/flan-t5-xl", "google/flan-t5-xxl"] MODELS_DIMS = { "t5-small": 512, "t5-base": 768, "t5-large": 1024, "t5-3b": 1024, "t5-11b": 1024, "google/flan-t5-small": 512, "google/flan-t5-base": 768, "google/flan-t5-large": 1024, "google/flan-t5-3b": 1024, "google/flan-t5-11b": 1024, } def __init__(self, name: str, output_dim: int, finetune: bool, device: str, autocast_dtype: tp.Optional[str] = 'float32', word_dropout: float = 0., normalize_text: bool = False): assert name in self.MODELS, f"Unrecognized t5 model name (should in {self.MODELS})" super().__init__(self.MODELS_DIMS[name], output_dim) self.device = device self.name = name self.finetune = finetune self.word_dropout = word_dropout if autocast_dtype is None or self.device == 'cpu': self.autocast = TorchAutocast(enabled=False) if self.device != 'cpu': logger.warning("T5 has no autocast, this might lead to NaN") else: dtype = getattr(torch, autocast_dtype) assert isinstance(dtype, torch.dtype) logger.info(f"T5 will be evaluated with autocast as {autocast_dtype}") self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) # Let's disable logging temporarily because T5 will vomit some errors otherwise. # thanks https://gist.github.com/simon-weber/7853144 previous_level = logging.root.manager.disable logging.disable(logging.ERROR) with warnings.catch_warnings(): warnings.simplefilter("ignore") try: self.t5_tokenizer = T5Tokenizer.from_pretrained(name) t5 = T5EncoderModel.from_pretrained(name).train(mode=finetune) finally: logging.disable(previous_level) if finetune: self.t5 = t5 else: # this makes sure that the t5 models is not part # of the saved checkpoint self.__dict__['t5'] = t5.to(device) self.normalize_text = normalize_text if normalize_text: self.text_normalizer = WhiteSpaceTokenizer(1, lemma=True, stopwords=True) def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Dict[str, torch.Tensor]: # if current sample doesn't have a certain attribute, replace with empty string entries: tp.List[str] = [xi if xi is not None else "" for xi in x] if self.normalize_text: _, _, entries = self.text_normalizer(entries, return_text=True) if self.word_dropout > 0. and self.training: new_entries = [] for entry in entries: words = [word for word in entry.split(" ") if random.random() >= self.word_dropout] new_entries.append(" ".join(words)) entries = new_entries empty_idx = torch.LongTensor([i for i, xi in enumerate(entries) if xi == ""]) inputs = self.t5_tokenizer(entries, return_tensors='pt', padding=True).to(self.device) mask = inputs['attention_mask'] mask[empty_idx, :] = 0 # zero-out index where the input is non-existant return inputs def forward(self, inputs: tp.Dict[str, torch.Tensor]) -> ConditionType: mask = inputs['attention_mask'] with torch.set_grad_enabled(self.finetune), self.autocast: embeds = self.t5(**inputs).last_hidden_state embeds = self.output_proj(embeds.to(self.output_proj.weight)) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class WaveformConditioner(BaseConditioner): """Base class for all conditioners that take a waveform as input. Classes that inherit must implement `_get_wav_embedding` that outputs a continuous tensor, and `_downsampling_factor` that returns the down-sampling factor of the embedding model. Args: dim (int): The internal representation dimension. output_dim (int): Output dimension. device (tp.Union[torch.device, str]): Device. """ def __init__(self, dim: int, output_dim: int, device: tp.Union[torch.device, str]): super().__init__(dim, output_dim) self.device = device # if False no masking is done, used in ChromaStemConditioner when completing by periodicity a sample. self._use_masking = True def tokenize(self, x: WavCondition) -> WavCondition: wav, length, sample_rate, path, seek_time = x assert length is not None return WavCondition(wav.to(self.device), length.to(self.device), sample_rate, path, seek_time) def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: """Gets as input a WavCondition and returns a dense embedding.""" raise NotImplementedError() def _downsampling_factor(self): """Returns the downsampling factor of the embedding model.""" raise NotImplementedError() def forward(self, x: WavCondition) -> ConditionType: """Extract condition embedding and mask from a waveform and its metadata. Args: x (WavCondition): Waveform condition containing raw waveform and metadata. Returns: ConditionType: a dense vector representing the conditioning along with its mask """ wav, lengths, *_ = x with torch.no_grad(): embeds = self._get_wav_embedding(x) embeds = embeds.to(self.output_proj.weight) embeds = self.output_proj(embeds) if lengths is not None and self._use_masking: lengths = lengths / self._downsampling_factor() mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore else: mask = torch.ones_like(embeds[..., 0]) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class ChromaStemConditioner(WaveformConditioner): """Chroma conditioner based on stems. The ChromaStemConditioner uses DEMUCS to first filter out drums and bass, as the drums and bass often dominate the chroma leading to the chroma features not containing information about the melody. Args: output_dim (int): Output dimension for the conditioner. sample_rate (int): Sample rate for the chroma extractor. n_chroma (int): Number of chroma bins for the chroma extractor. radix2_exp (int): Size of stft window for the chroma extractor (power of 2, e.g. 12 -> 2^12). duration (int): duration used during training. This is later used for correct padding in case we are using chroma as prefix. match_len_on_eval (bool, optional): if True then all chromas are padded to the training duration. Defaults to False. eval_wavs (str, optional): path to a dataset manifest with waveform, this waveforms are used as conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). Defaults to None. n_eval_wavs (int, optional): limits the number of waveforms used for conditioning. Defaults to 0. device (tp.Union[torch.device, str], optional): Device for the conditioner. **kwargs: Additional parameters for the chroma extractor. """ def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None, n_eval_wavs: int = 0, cache_path: tp.Optional[tp.Union[str, Path]] = None, device: tp.Union[torch.device, str] = 'cpu', **kwargs): super().__init__(dim=n_chroma, output_dim=output_dim, device=device) self.autocast = TorchAutocast(enabled=device != 'cpu', device_type=self.device, dtype=torch.float32) self.sample_rate = sample_rate self.match_len_on_eval = match_len_on_eval if match_len_on_eval: self._use_masking = False self.duration = duration self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device) stem_sources: list = self.demucs.sources # type: ignore self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('other')]).to(device) self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma, radix2_exp=radix2_exp, **kwargs).to(device) self.chroma_len = self._get_chroma_len() self.eval_wavs: tp.Optional[torch.Tensor] = self._load_eval_wavs(eval_wavs, n_eval_wavs) self.cache = None if cache_path is not None: self.cache = EmbeddingCache(Path(cache_path) / 'wav', self.device, compute_embed_fn=self._get_full_chroma_for_cache, extract_embed_fn=self._extract_chroma_chunk) def _downsampling_factor(self) -> int: return self.chroma.winhop def _load_eval_wavs(self, path: tp.Optional[str], num_samples: int) -> tp.Optional[torch.Tensor]: """Load pre-defined waveforms from a json. These waveforms will be used for chroma extraction during evaluation. This is done to make the evaluation on MusicCaps fair (we shouldn't see the chromas of MusicCaps). """ if path is None: return None logger.info(f"Loading evaluation wavs from {path}") dataset: AudioDataset = AudioDataset.from_meta( path, segment_duration=self.duration, min_audio_duration=self.duration, sample_rate=self.sample_rate, channels=1) if len(dataset) > 0: eval_wavs = dataset.collater([dataset[i] for i in range(num_samples)]).to(self.device) logger.info(f"Using {len(eval_wavs)} evaluation wavs for chroma-stem conditioner") return eval_wavs else: raise ValueError("Could not find evaluation wavs, check lengths of wavs") def reset_eval_wavs(self, eval_wavs: tp.Optional[torch.Tensor]) -> None: self.eval_wavs = eval_wavs def has_eval_wavs(self) -> bool: return self.eval_wavs is not None def _sample_eval_wavs(self, num_samples: int) -> torch.Tensor: """Sample wavs from a predefined list.""" assert self.eval_wavs is not None, "Cannot sample eval wavs as no eval wavs provided." total_eval_wavs = len(self.eval_wavs) out = self.eval_wavs if num_samples > total_eval_wavs: out = self.eval_wavs.repeat(num_samples // total_eval_wavs + 1, 1, 1) return out[torch.randperm(len(out))][:num_samples] def _get_chroma_len(self) -> int: """Get length of chroma during training.""" dummy_wav = torch.zeros((1, int(self.sample_rate * self.duration)), device=self.device) dummy_chr = self.chroma(dummy_wav) return dummy_chr.shape[1] @torch.no_grad() def _get_stemmed_wav(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Get parts of the wav that holds the melody, extracting the main stems from the wav.""" with self.autocast: wav = convert_audio( wav, sample_rate, self.demucs.samplerate, self.demucs.audio_channels) # type: ignore stems = apply_model(self.demucs, wav, device=self.device) stems = stems[:, self.stem_indices] # extract relevant stems for melody conditioning mix_wav = stems.sum(1) # merge extracted stems to single waveform mix_wav = convert_audio(mix_wav, self.demucs.samplerate, self.sample_rate, 1) # type: ignore return mix_wav @torch.no_grad() def _extract_chroma(self, wav: torch.Tensor) -> torch.Tensor: """Extract chroma features from the waveform.""" with self.autocast: return self.chroma(wav) @torch.no_grad() def _compute_wav_embedding(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Compute wav embedding, applying stem and chroma extraction.""" # avoid 0-size tensors when we are working with null conds if wav.shape[-1] == 1: return self._extract_chroma(wav) stems = self._get_stemmed_wav(wav, sample_rate) chroma = self._extract_chroma(stems) return chroma @torch.no_grad() def _get_full_chroma_for_cache(self, path: tp.Union[str, Path], x: WavCondition, idx: int) -> torch.Tensor: """Extract chroma from the whole audio waveform at the given path.""" wav, sr = audio_read(path) wav = wav[None].to(self.device) wav = convert_audio(wav, sr, self.sample_rate, to_channels=1) chroma = self._compute_wav_embedding(wav, self.sample_rate)[0] return chroma def _extract_chroma_chunk(self, full_chroma: torch.Tensor, x: WavCondition, idx: int) -> torch.Tensor: """Extract a chunk of chroma from the full chroma derived from the full waveform.""" wav_length = x.wav.shape[-1] seek_time = x.seek_time[idx] assert seek_time is not None, ( "WavCondition seek_time is required " "when extracting chroma chunks from pre-computed chroma.") full_chroma = full_chroma.float() frame_rate = self.sample_rate / self._downsampling_factor() target_length = int(frame_rate * wav_length / self.sample_rate) index = int(frame_rate * seek_time) out = full_chroma[index: index + target_length] out = F.pad(out[None], (0, 0, 0, target_length - out.shape[0]))[0] return out.to(self.device) @torch.no_grad() def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: """Get the wav embedding from the WavCondition. The conditioner will either extract the embedding on-the-fly computing it from the condition wav directly or will rely on the embedding cache to load the pre-computed embedding if relevant. """ sampled_wav: tp.Optional[torch.Tensor] = None if not self.training and self.eval_wavs is not None: warn_once(logger, "Using precomputed evaluation wavs!") sampled_wav = self._sample_eval_wavs(len(x.wav)) no_undefined_paths = all(p is not None for p in x.path) no_nullified_cond = x.wav.shape[-1] > 1 if sampled_wav is not None: chroma = self._compute_wav_embedding(sampled_wav, self.sample_rate) elif self.cache is not None and no_undefined_paths and no_nullified_cond: paths = [Path(p) for p in x.path if p is not None] chroma = self.cache.get_embed_from_cache(paths, x) else: assert all(sr == x.sample_rate[0] for sr in x.sample_rate), "All sample rates in batch should be equal." chroma = self._compute_wav_embedding(x.wav, x.sample_rate[0]) if self.match_len_on_eval: B, T, C = chroma.shape if T > self.chroma_len: chroma = chroma[:, :self.chroma_len] logger.debug(f"Chroma was truncated to match length! ({T} -> {chroma.shape[1]})") elif T < self.chroma_len: n_repeat = int(math.ceil(self.chroma_len / T)) chroma = chroma.repeat(1, n_repeat, 1) chroma = chroma[:, :self.chroma_len] logger.debug(f"Chroma was repeated to match length! ({T} -> {chroma.shape[1]})") return chroma def tokenize(self, x: WavCondition) -> WavCondition: """Apply WavConditioner tokenization and populate cache if needed.""" x = super().tokenize(x) no_undefined_paths = all(p is not None for p in x.path) if self.cache is not None and no_undefined_paths: paths = [Path(p) for p in x.path if p is not None] self.cache.populate_embed_cache(paths, x) return x class ChromaChordConditioner(ChromaStemConditioner): """Chord Chroma conditioner based on stems. The ChromaChordConditioner uses DEMUCS to first filter out drums and bass, as the drums and bass often dominate the chroma leading to the chroma features not containing information about the melody. Args: output_dim (int): Output dimension for the conditioner. sample_rate (int): Sample rate for the chroma extractor. n_chroma (int): Number of chroma bins for the chroma extractor. radix2_exp (int): Size of stft window for the chroma extractor (power of 2, e.g. 12 -> 2^12). duration (int): duration used during training. This is later used for correct padding in case we are using chroma as prefix. match_len_on_eval (bool, optional): if True then all chromas are padded to the training duration. Defaults to False. eval_wavs (str, optional): path to a dataset manifest with waveform, this waveforms are used as conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). Defaults to None. n_eval_wavs (int, optional): limits the number of waveforms used for conditioning. Defaults to 0. device (tp.Union[torch.device, str], optional): Device for the conditioner. **kwargs: Additional parameters for the chroma extractor. """ def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None, n_eval_wavs: int = 0, cache_path: tp.Optional[tp.Union[str, Path]] = None, device: tp.Union[torch.device, str] = 'cpu', **kwargs): super().__init__(output_dim = output_dim, sample_rate = sample_rate, n_chroma = n_chroma, radix2_exp = radix2_exp, duration = duration, match_len_on_eval = match_len_on_eval, eval_wavs = eval_wavs, n_eval_wavs = n_eval_wavs, cache_path = cache_path, device = device) self.winhop = self.chroma.winhop self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device) stem_sources: list = self.demucs.sources self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('bass'), stem_sources.index('other')]).to(device) self.chroma_len = self._get_chroma_len() self.bar2chromabin = self.sample_rate / self.winhop self.chroma = ChordExtractor(device = device, sample_rate=sample_rate, n_chroma=n_chroma, max_duration = duration, chroma_len = self.chroma_len, winhop = self.winhop).to(device)
self.chords = chords.Chords()
16
2023-10-09 09:52:24+00:00
16k
RVC-Project/Retrieval-based-Voice-Conversion
rvc/modules/vc/modules.py
[ { "identifier": "Config", "path": "rvc/configs/config.py", "snippet": "class Config:\n def __new__(cls):\n if not hasattr(cls, \"_instance\"):\n cls._instance = super().__new__(cls)\n return cls._instance\n\n def __init__(self):\n self.device: str = \"cuda:0\"\n ...
import logging import os import traceback import numpy as np import soundfile as sf import torch from collections import OrderedDict from io import BytesIO from pathlib import Path from rvc.configs.config import Config from rvc.lib.audio import load_audio, wav2 from rvc.lib.infer_pack.models import ( SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono, SynthesizerTrnMs768NSFsid, SynthesizerTrnMs768NSFsid_nono, ) from rvc.modules.vc.pipeline import Pipeline from rvc.modules.vc.utils import *
13,121
self.net_g = ( self.net_g.half() if self.config.is_half else self.net_g.float() ) self.pipeline = Pipeline(self.tgt_sr, self.config) self.n_spk = self.cpt["config"][-3] index = get_index_path_from_model(sid) logger.info("Select index: " + index) return self.n_spk, return_protect, index def vc_single( self, sid: int, input_audio_path: Path, f0_up_key: int = 0, f0_method: str = "rmvpe", f0_file: Path | None = None, index_file: Path | None = None, index_rate: float = 0.75, filter_radius: int = 3, resample_sr: int = 0, rms_mix_rate: float = 0.25, protect: float = 0.33, hubert_path: str | None = None, ): hubert_path = os.getenv("hubert_path") if not hubert_path else hubert_path try: audio = load_audio(input_audio_path, 16000) audio_max = np.abs(audio).max() / 0.95 if audio_max > 1: audio /= audio_max times = {"npy": 0, "f0": 0, "infer": 0} if self.hubert_model is None: self.hubert_model = load_hubert(self.config, hubert_path) audio_opt = self.pipeline.pipeline( self.hubert_model, self.net_g, sid, audio, input_audio_path, times, f0_up_key, f0_method, index_file, index_rate, self.if_f0, filter_radius, self.tgt_sr, resample_sr, rms_mix_rate, self.version, protect, f0_file, ) tgt_sr = resample_sr if self.tgt_sr != resample_sr >= 16000 else self.tgt_sr return tgt_sr, audio_opt, times, None except Exception: info = traceback.format_exc() logger.warning(info) return None, None, None, info def vc_multi( self, sid: int, paths: list, opt_root: Path, f0_up_key: int = 0, f0_method: str = "rmvpe", f0_file: Path | None = None, index_file: Path | None = None, index_rate: float = 0.75, filter_radius: int = 3, resample_sr: int = 0, rms_mix_rate: float = 0.25, protect: float = 0.33, output_format: str = "wav", hubert_path: str | None = None, ): try: os.makedirs(opt_root, exist_ok=True) paths = [path.name for path in paths] infos = [] for path in paths: tgt_sr, audio_opt, _, info = self.vc_single( sid, Path(path), f0_up_key, f0_method, f0_file, index_file, index_rate, filter_radius, resample_sr, rms_mix_rate, protect, hubert_path, ) if info: try: if output_format in ["wav", "flac"]: sf.write( f"{opt_root}/{os.path.basename(path)}.{output_format}", audio_opt, tgt_sr, ) else: with BytesIO() as wavf: sf.write(wavf, audio_opt, tgt_sr, format="wav") wavf.seek(0, 0) with open( f"{opt_root}/{os.path.basename(path)}.{output_format}", "wb", ) as outf:
logger: logging.Logger = logging.getLogger(__name__) class VC: def __init__(self): self.n_spk: any = None self.tgt_sr: int | None = None self.net_g = None self.pipeline: Pipeline | None = None self.cpt: OrderedDict | None = None self.version: str | None = None self.if_f0: int | None = None self.version: str | None = None self.hubert_model: any = None self.config = Config() def get_vc(self, sid: str, *to_return_protect: int): logger.info("Get sid: " + sid) return_protect = [ to_return_protect[0] if self.if_f0 != 0 and to_return_protect else 0.5, to_return_protect[1] if self.if_f0 != 0 and to_return_protect else 0.33, ] person = f'{os.getenv("weight_root")}/{sid}' logger.info(f"Loading: {person}") self.cpt = torch.load(person, map_location="cpu") self.tgt_sr = self.cpt["config"][-1] self.cpt["config"][-3] = self.cpt["weight"]["emb_g.weight"].shape[0] # n_spk self.if_f0 = self.cpt.get("f0", 1) self.version = self.cpt.get("version", "v1") synthesizer_class = { ("v1", 1): SynthesizerTrnMs256NSFsid, ("v1", 0): SynthesizerTrnMs256NSFsid_nono, ("v2", 1): SynthesizerTrnMs768NSFsid, ("v2", 0): SynthesizerTrnMs768NSFsid_nono, } self.net_g = synthesizer_class.get( (self.version, self.if_f0), SynthesizerTrnMs256NSFsid )(*self.cpt["config"], is_half=self.config.is_half) del self.net_g.enc_q if sid == "" or []: logger.info("Clean model cache") del (self.hubert_model, self.tgt_sr, self.net_g) (self.net_g) = self.n_spk = index = None else: self.net_g.load_state_dict(self.cpt["weight"], strict=False) self.net_g.eval().to(self.config.device) self.net_g = ( self.net_g.half() if self.config.is_half else self.net_g.float() ) self.pipeline = Pipeline(self.tgt_sr, self.config) self.n_spk = self.cpt["config"][-3] index = get_index_path_from_model(sid) logger.info("Select index: " + index) return self.n_spk, return_protect, index def vc_single( self, sid: int, input_audio_path: Path, f0_up_key: int = 0, f0_method: str = "rmvpe", f0_file: Path | None = None, index_file: Path | None = None, index_rate: float = 0.75, filter_radius: int = 3, resample_sr: int = 0, rms_mix_rate: float = 0.25, protect: float = 0.33, hubert_path: str | None = None, ): hubert_path = os.getenv("hubert_path") if not hubert_path else hubert_path try: audio = load_audio(input_audio_path, 16000) audio_max = np.abs(audio).max() / 0.95 if audio_max > 1: audio /= audio_max times = {"npy": 0, "f0": 0, "infer": 0} if self.hubert_model is None: self.hubert_model = load_hubert(self.config, hubert_path) audio_opt = self.pipeline.pipeline( self.hubert_model, self.net_g, sid, audio, input_audio_path, times, f0_up_key, f0_method, index_file, index_rate, self.if_f0, filter_radius, self.tgt_sr, resample_sr, rms_mix_rate, self.version, protect, f0_file, ) tgt_sr = resample_sr if self.tgt_sr != resample_sr >= 16000 else self.tgt_sr return tgt_sr, audio_opt, times, None except Exception: info = traceback.format_exc() logger.warning(info) return None, None, None, info def vc_multi( self, sid: int, paths: list, opt_root: Path, f0_up_key: int = 0, f0_method: str = "rmvpe", f0_file: Path | None = None, index_file: Path | None = None, index_rate: float = 0.75, filter_radius: int = 3, resample_sr: int = 0, rms_mix_rate: float = 0.25, protect: float = 0.33, output_format: str = "wav", hubert_path: str | None = None, ): try: os.makedirs(opt_root, exist_ok=True) paths = [path.name for path in paths] infos = [] for path in paths: tgt_sr, audio_opt, _, info = self.vc_single( sid, Path(path), f0_up_key, f0_method, f0_file, index_file, index_rate, filter_radius, resample_sr, rms_mix_rate, protect, hubert_path, ) if info: try: if output_format in ["wav", "flac"]: sf.write( f"{opt_root}/{os.path.basename(path)}.{output_format}", audio_opt, tgt_sr, ) else: with BytesIO() as wavf: sf.write(wavf, audio_opt, tgt_sr, format="wav") wavf.seek(0, 0) with open( f"{opt_root}/{os.path.basename(path)}.{output_format}", "wb", ) as outf:
wav2(wavf, outf, output_format)
2
2023-10-14 09:52:31+00:00
16k
zhijie-group/LOVECon
video_diffusion/pipelines/stable_diffusion_controlnet.py
[ { "identifier": "UNetPseudo3DConditionModel", "path": "video_diffusion/models/unet_3d_condition.py", "snippet": "class UNetPseudo3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Opti...
import inspect import os, sys import PIL import torch import numpy as np import json import diffusers import bitsandbytes from dataclasses import dataclass from typing import Callable, List, Optional, Union,Dict,Any from einops import rearrange from tqdm import trange, tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from ..models.unet_3d_condition import UNetPseudo3DConditionModel from ..models.controlnet_3d_condition import ControlNetPseudo3DModel from video_diffusion.prompt_attention import attention_util from accelerate import cpu_offload
11,273
# code mostly taken from https://github.com/huggingface/diffusers logger = logging.get_logger(__name__) # pylint: disable=invalid-name class SpatioTemporalStableDiffusionControlnetPipeline(DiffusionPipeline): r""" Pipeline for text-to-video generation using Spatio-Temporal Stable Diffusion. """ _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer,
# code mostly taken from https://github.com/huggingface/diffusers logger = logging.get_logger(__name__) # pylint: disable=invalid-name class SpatioTemporalStableDiffusionControlnetPipeline(DiffusionPipeline): r""" Pipeline for text-to-video generation using Spatio-Temporal Stable Diffusion. """ _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer,
unet: UNetPseudo3DConditionModel,
0
2023-10-09 14:38:28+00:00
16k
mlpc-ucsd/MaskCLIP
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "maskclip/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NA...
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog, build_detection_train_loader from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from maskclip import ( COCOInstanceNewBaselineDatasetMapper, COCOPanopticNewBaselineDatasetMapper, InstanceSegEvaluator, MaskFormerInstanceDatasetMapper, MaskFormerPanopticDatasetMapper, MaskFormerSemanticDatasetMapper, SemanticSegmentorWithTTA, add_maskformer2_config, ) import warnings import copy import itertools import logging import os import torch import detectron2.utils.comm as comm
12,665
mapper = None return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_optimizer(cls, cfg, model): weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED defaults = {} defaults["lr"] = cfg.SOLVER.BASE_LR defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module_name, module in model.named_modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue if not 'added_params' in module_name: continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if "backbone" in module_name: hyperparams["lr"] = hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER if ( "relative_position_bias_table" in module_param_name or "absolute_pos_embed" in module_param_name ): print(module_param_name) hyperparams["weight_decay"] = 0.0 if isinstance(module, norm_module_types): hyperparams["weight_decay"] = weight_decay_norm if isinstance(module, torch.nn.Embedding): hyperparams["weight_decay"] = weight_decay_embed params.append({"params": [value], **hyperparams}) def maybe_add_full_model_gradient_clipping(optim): # detectron2 doesn't have full model gradient clipping now clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = ( cfg.SOLVER.CLIP_GRADIENTS.ENABLED and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" and clip_norm_val > 0.0 ) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain(*[x["params"] for x in self.param_groups]) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def test_with_TTA(cls, cfg, model): logger = logging.getLogger("detectron2.trainer") # In the end of training, run an evaluation with TTA. logger.info("Running inference with test-time augmentation ...") model = SemanticSegmentorWithTTA(cfg, model) evaluators = [ cls.build_evaluator( cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA") ) for name in cfg.DATASETS.TEST ] res = cls.test(cfg, model, evaluators) res = OrderedDict({k + "_TTA": v for k, v in res.items()}) return res def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # for poly lr schedule add_deeplab_config(cfg)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MaskFormer Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance": mapper = MaskFormerInstanceDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco instance segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_instance_lsj": mapper = COCOInstanceNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco panoptic segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_panoptic_lsj": mapper = COCOPanopticNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) else: mapper = None return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_optimizer(cls, cfg, model): weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED defaults = {} defaults["lr"] = cfg.SOLVER.BASE_LR defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module_name, module in model.named_modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue if not 'added_params' in module_name: continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if "backbone" in module_name: hyperparams["lr"] = hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER if ( "relative_position_bias_table" in module_param_name or "absolute_pos_embed" in module_param_name ): print(module_param_name) hyperparams["weight_decay"] = 0.0 if isinstance(module, norm_module_types): hyperparams["weight_decay"] = weight_decay_norm if isinstance(module, torch.nn.Embedding): hyperparams["weight_decay"] = weight_decay_embed params.append({"params": [value], **hyperparams}) def maybe_add_full_model_gradient_clipping(optim): # detectron2 doesn't have full model gradient clipping now clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = ( cfg.SOLVER.CLIP_GRADIENTS.ENABLED and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" and clip_norm_val > 0.0 ) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain(*[x["params"] for x in self.param_groups]) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def test_with_TTA(cls, cfg, model): logger = logging.getLogger("detectron2.trainer") # In the end of training, run an evaluation with TTA. logger.info("Running inference with test-time augmentation ...") model = SemanticSegmentorWithTTA(cfg, model) evaluators = [ cls.build_evaluator( cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA") ) for name in cfg.DATASETS.TEST ] res = cls.test(cfg, model, evaluators) res = OrderedDict({k + "_TTA": v for k, v in res.items()}) return res def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # for poly lr schedule add_deeplab_config(cfg)
add_maskformer2_config(cfg)
0
2023-10-13 02:32:25+00:00
16k
mlpc-ucsd/MasQCLIP
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "masqclip/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NA...
import copy import itertools import logging import os import torch import detectron2.utils.comm as comm import warnings from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog, build_detection_train_loader from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from masqclip import ( COCOInstanceNewBaselineDatasetMapper, COCOPanopticNewBaselineDatasetMapper, InstanceSegEvaluator, MaskFormerInstanceDatasetMapper, MaskFormerPanopticDatasetMapper, MaskFormerSemanticDatasetMapper, SemanticSegmentorWithTTA, add_maskformer2_config, add_masqclip_config, )
11,257
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MasQCLIP Training Script. """ # MasQCLIP warnings.filterwarnings("ignore") class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance":
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MasQCLIP Training Script. """ # MasQCLIP warnings.filterwarnings("ignore") class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance":
mapper = MaskFormerInstanceDatasetMapper(cfg, True)
4
2023-10-13 02:43:53+00:00
16k
ielab/llm-rankers
run.py
[ { "identifier": "SearchResult", "path": "rankers/rankers.py", "snippet": "class SearchResult:\n docid: str\n score: float\n text: str" }, { "identifier": "PointwiseLlmRanker", "path": "rankers/pointwise.py", "snippet": "class PointwiseLlmRanker(LlmRanker):\n\n def __init__(se...
import logging import ir_datasets import argparse import sys import json import time import random from pyserini.search.lucene import LuceneSearcher from pyserini.search._base import get_topics from rankers.rankers import SearchResult from rankers.pointwise import PointwiseLlmRanker, MonoT5LlmRanker from rankers.setwise import SetwiseLlmRanker, OpenAiSetwiseLlmRanker from rankers.pairwise import PairwiseLlmRanker, DuoT5LlmRanker, OpenAiPairwiseLlmRanker from rankers.listwise import OpenAiListwiseLlmRanker, ListwiseLlmRanker from tqdm import tqdm
13,913
random.seed(929) logger = logging.getLogger(__name__) def parse_args(parser, commands): # Divide argv by commands split_argv = [[]] for c in sys.argv[1:]: if c in commands.choices: split_argv.append([c]) else: split_argv[-1].append(c) # Initialize namespace args = argparse.Namespace() for c in commands.choices: setattr(args, c, None) # Parse each command parser.parse_args(split_argv[0], namespace=args) # Without command for argv in split_argv[1:]: # Commands n = argparse.Namespace() setattr(args, argv[0], n) parser.parse_args(argv, namespace=n) return args def write_run_file(path, results, tag): with open(path, 'w') as f: for qid, _, ranking in results: rank = 1 for doc in ranking: docid = doc.docid score = doc.score f.write(f"{qid}\tQ0\t{docid}\t{rank}\t{score}\t{tag}\n") rank += 1 def main(args): if args.pointwise: if 'monot5' in args.run.model_name_or_path: ranker = MonoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) else: ranker = PointwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) elif args.setwise: if args.run.openai_key: ranker = OpenAiSetwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, num_child=args.setwise.num_child, method=args.setwise.method, k=args.setwise.k) else: ranker = SetwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, num_child=args.setwise.num_child, scoring=args.run.scoring, method=args.setwise.method, num_permutation=args.setwise.num_permutation, k=args.setwise.k) elif args.pairwise: if args.pairwise.method != 'allpair': args.pairwise.batch_size = 2 logger.info(f'Setting batch_size to 2.') if args.run.openai_key: ranker = OpenAiPairwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, method=args.pairwise.method, k=args.pairwise.k) elif 'duot5' in args.run.model_name_or_path: ranker = DuoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pairwise.method, batch_size=args.pairwise.batch_size, k=args.pairwise.k) else: ranker = PairwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pairwise.method, batch_size=args.pairwise.batch_size, k=args.pairwise.k) elif args.listwise: if args.run.openai_key:
random.seed(929) logger = logging.getLogger(__name__) def parse_args(parser, commands): # Divide argv by commands split_argv = [[]] for c in sys.argv[1:]: if c in commands.choices: split_argv.append([c]) else: split_argv[-1].append(c) # Initialize namespace args = argparse.Namespace() for c in commands.choices: setattr(args, c, None) # Parse each command parser.parse_args(split_argv[0], namespace=args) # Without command for argv in split_argv[1:]: # Commands n = argparse.Namespace() setattr(args, argv[0], n) parser.parse_args(argv, namespace=n) return args def write_run_file(path, results, tag): with open(path, 'w') as f: for qid, _, ranking in results: rank = 1 for doc in ranking: docid = doc.docid score = doc.score f.write(f"{qid}\tQ0\t{docid}\t{rank}\t{score}\t{tag}\n") rank += 1 def main(args): if args.pointwise: if 'monot5' in args.run.model_name_or_path: ranker = MonoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) else: ranker = PointwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) elif args.setwise: if args.run.openai_key: ranker = OpenAiSetwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, num_child=args.setwise.num_child, method=args.setwise.method, k=args.setwise.k) else: ranker = SetwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, num_child=args.setwise.num_child, scoring=args.run.scoring, method=args.setwise.method, num_permutation=args.setwise.num_permutation, k=args.setwise.k) elif args.pairwise: if args.pairwise.method != 'allpair': args.pairwise.batch_size = 2 logger.info(f'Setting batch_size to 2.') if args.run.openai_key: ranker = OpenAiPairwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, method=args.pairwise.method, k=args.pairwise.k) elif 'duot5' in args.run.model_name_or_path: ranker = DuoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pairwise.method, batch_size=args.pairwise.batch_size, k=args.pairwise.k) else: ranker = PairwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pairwise.method, batch_size=args.pairwise.batch_size, k=args.pairwise.k) elif args.listwise: if args.run.openai_key:
ranker = OpenAiListwiseLlmRanker(model_name_or_path=args.run.model_name_or_path,
8
2023-10-14 01:39:38+00:00
16k
amazon-science/tabsyn
baselines/tabddpm/train.py
[ { "identifier": "make_dataset", "path": "utils_train.py", "snippet": "def make_dataset(\n data_path: str,\n T: src.Transformations,\n task_type,\n change_val: bool,\n concat = True,\n):\n\n # classification\n if task_type == 'binclass' or task_type == 'multiclass':\n X_cat = ...
import os import sys import time import torch import numpy as np import pandas as pd import src from copy import deepcopy from utils_train import make_dataset, update_ema from baselines.tabddpm.models.modules import MLPDiffusion from baselines.tabddpm.models.gaussian_multinomial_distribution import GaussianMultinomialDiffusion
12,643
self.ema_model = deepcopy(self.diffusion._denoise_fn) for param in self.ema_model.parameters(): param.detach_() self.train_iter = train_iter self.steps = steps self.init_lr = lr self.optimizer = torch.optim.AdamW(self.diffusion.parameters(), lr=lr, weight_decay=weight_decay) self.device = device self.loss_history = pd.DataFrame(columns=['step', 'mloss', 'gloss', 'loss']) self.model_save_path = model_save_path columns = list(np.arange(5)*200) columns[0] = 1 columns = ['step'] + columns self.log_every = 50 self.print_every = 1 self.ema_every = 1000 def _anneal_lr(self, step): frac_done = step / self.steps lr = self.init_lr * (1 - frac_done) for param_group in self.optimizer.param_groups: param_group["lr"] = lr def _run_step(self, x): x = x.to(self.device) self.optimizer.zero_grad() loss_multi, loss_gauss = self.diffusion.mixed_loss(x) loss = loss_multi + loss_gauss loss.backward() self.optimizer.step() return loss_multi, loss_gauss def run_loop(self): step = 0 curr_loss_multi = 0.0 curr_loss_gauss = 0.0 curr_count = 0 self.print_every = 1 self.log_every = 1 best_loss = np.inf print('Steps: ', self.steps) while step < self.steps: start_time = time.time() x = next(self.train_iter)[0] batch_loss_multi, batch_loss_gauss = self._run_step(x) self._anneal_lr(step) curr_count += len(x) curr_loss_multi += batch_loss_multi.item() * len(x) curr_loss_gauss += batch_loss_gauss.item() * len(x) if (step + 1) % self.log_every == 0: mloss = np.around(curr_loss_multi / curr_count, 4) gloss = np.around(curr_loss_gauss / curr_count, 4) if np.isnan(gloss): print('Finding Nan') break if (step + 1) % self.print_every == 0: print(f'Step {(step + 1)}/{self.steps} MLoss: {mloss} GLoss: {gloss} Sum: {mloss + gloss}') self.loss_history.loc[len(self.loss_history)] =[step + 1, mloss, gloss, mloss + gloss] np.set_printoptions(suppress=True) curr_count = 0 curr_loss_gauss = 0.0 curr_loss_multi = 0.0 if mloss + gloss < best_loss: best_loss = mloss + gloss torch.save(self.diffusion._denoise_fn.state_dict(), os.path.join(self.model_save_path, 'model.pt')) if (step + 1) % 10000 == 0: torch.save(self.diffusion._denoise_fn.state_dict(), os.path.join(self.model_save_path, f'model_{step+1}.pt')) # update_ema(self.ema_model.parameters(), self.diffusion._denoise_fn.parameters()) step += 1 # end_time = time.time() # print('Time: ', end_time - start_time) def train( model_save_path, real_data_path, steps = 1000, lr = 0.002, weight_decay = 1e-4, batch_size = 1024, task_type = 'binclass', model_type = 'mlp', model_params = None, num_timesteps = 1000, gaussian_loss_type = 'mse', scheduler = 'cosine', T_dict = None, num_numerical_features = 0, device = torch.device('cuda:0'), seed = 0, change_val = False ): real_data_path = os.path.normpath(real_data_path) # zero.improve_reproducibility(seed) T = src.Transformations(**T_dict)
def get_model( model_name, model_params, n_num_features, category_sizes ): print(model_name) if model_name == 'mlp': model = MLPDiffusion(**model_params) else: raise "Unknown model!" return model class Trainer: def __init__(self, diffusion, train_iter, lr, weight_decay, steps, model_save_path, device=torch.device('cuda:1')): self.diffusion = diffusion self.ema_model = deepcopy(self.diffusion._denoise_fn) for param in self.ema_model.parameters(): param.detach_() self.train_iter = train_iter self.steps = steps self.init_lr = lr self.optimizer = torch.optim.AdamW(self.diffusion.parameters(), lr=lr, weight_decay=weight_decay) self.device = device self.loss_history = pd.DataFrame(columns=['step', 'mloss', 'gloss', 'loss']) self.model_save_path = model_save_path columns = list(np.arange(5)*200) columns[0] = 1 columns = ['step'] + columns self.log_every = 50 self.print_every = 1 self.ema_every = 1000 def _anneal_lr(self, step): frac_done = step / self.steps lr = self.init_lr * (1 - frac_done) for param_group in self.optimizer.param_groups: param_group["lr"] = lr def _run_step(self, x): x = x.to(self.device) self.optimizer.zero_grad() loss_multi, loss_gauss = self.diffusion.mixed_loss(x) loss = loss_multi + loss_gauss loss.backward() self.optimizer.step() return loss_multi, loss_gauss def run_loop(self): step = 0 curr_loss_multi = 0.0 curr_loss_gauss = 0.0 curr_count = 0 self.print_every = 1 self.log_every = 1 best_loss = np.inf print('Steps: ', self.steps) while step < self.steps: start_time = time.time() x = next(self.train_iter)[0] batch_loss_multi, batch_loss_gauss = self._run_step(x) self._anneal_lr(step) curr_count += len(x) curr_loss_multi += batch_loss_multi.item() * len(x) curr_loss_gauss += batch_loss_gauss.item() * len(x) if (step + 1) % self.log_every == 0: mloss = np.around(curr_loss_multi / curr_count, 4) gloss = np.around(curr_loss_gauss / curr_count, 4) if np.isnan(gloss): print('Finding Nan') break if (step + 1) % self.print_every == 0: print(f'Step {(step + 1)}/{self.steps} MLoss: {mloss} GLoss: {gloss} Sum: {mloss + gloss}') self.loss_history.loc[len(self.loss_history)] =[step + 1, mloss, gloss, mloss + gloss] np.set_printoptions(suppress=True) curr_count = 0 curr_loss_gauss = 0.0 curr_loss_multi = 0.0 if mloss + gloss < best_loss: best_loss = mloss + gloss torch.save(self.diffusion._denoise_fn.state_dict(), os.path.join(self.model_save_path, 'model.pt')) if (step + 1) % 10000 == 0: torch.save(self.diffusion._denoise_fn.state_dict(), os.path.join(self.model_save_path, f'model_{step+1}.pt')) # update_ema(self.ema_model.parameters(), self.diffusion._denoise_fn.parameters()) step += 1 # end_time = time.time() # print('Time: ', end_time - start_time) def train( model_save_path, real_data_path, steps = 1000, lr = 0.002, weight_decay = 1e-4, batch_size = 1024, task_type = 'binclass', model_type = 'mlp', model_params = None, num_timesteps = 1000, gaussian_loss_type = 'mse', scheduler = 'cosine', T_dict = None, num_numerical_features = 0, device = torch.device('cuda:0'), seed = 0, change_val = False ): real_data_path = os.path.normpath(real_data_path) # zero.improve_reproducibility(seed) T = src.Transformations(**T_dict)
dataset = make_dataset(
0
2023-10-10 18:06:31+00:00
16k
ThomasMrY/DisDiff
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import torch import torch.nn as nn import numpy as np import torch.nn.functional as F import pytorch_lightning as pl import copy import os import pandas as pd from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from ldm.modules.diffusionmodules.util import return_wrap
10,855
if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None,**kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates =ddim_sampler.sample(S = ddim_steps,batch_size = batch_size, shape = shape,conditioning = cond,verbose=False,**kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True,**kwargs) return samples, intermediates @torch.no_grad() def log_images(self, batch, N=8, n_row=8, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, plot_swapped_concepts = False, plot_decoded_xstart=False, plot_swapped_concepts_partial=True, **kwargs): use_ddim = ddim_steps is not None # plot_swapped_concepts = True log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) log["conditioning"] = xc elif self.cond_stage_key == 'class_label': xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) log['conditioning'] = xc
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) self.ce_loss = nn.CrossEntropyLoss(reduction = "none") if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) self.register_buffer("shift_coef", - to_torch(np.sqrt(alphas)) * (1. - self.alphas_cumprod_prev) / torch.sqrt(1. - self.alphas_cumprod)) self.register_buffer("ddim_coef", -self.sqrt_one_minus_alphas_cumprod) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") self.load_epoch = sd['epoch'] self.load_step = sd["global_step"] if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) eps_pred = return_wrap(model_out, extract_into_tensor(self.ddim_coef, t, x.shape)) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=eps_pred) elif self.parameterization == "x0": x_recon = eps_pred if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) eps_pred = return_wrap(model_out, extract_into_tensor(self.shift_coef, t, x_start.shape)) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(eps_pred, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): pass # _, loss_dict_no_ema = self.shared_step(batch) # with self.ema_scope(): # _, loss_dict_ema = self.shared_step(batch) # loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} # self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) # self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, dis_loss_flag = False, detach_flag = False, train_enc_flag = False, dis_weight = 1.0, dis_loss_type = "IM", *args, **kwargs): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key self.dis_loss_flag = dis_loss_flag self.detach_flag = detach_flag self.train_enc_flag = train_enc_flag self.dis_weight = dis_weight self.dis_loss_type = dis_loss_type try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # def on_train_batch_start(self, batch, batch_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if hasattr(self.model.diffusion_model,"scale_factor"): del self.scale_factor self.register_buffer('scale_factor', self.model.diffusion_model.scale_factor) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING Pre-Trained STD-RESCALING ###") else: del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] else: c = None xc = None out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) # same as above but without decorator def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params['original_image_size'] = x.shape[-2:] bs, nc, h, w = x.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False, sampled_concept= None, sampled_index= None): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if self.cond_stage_key in ["image", "LR_image", "segmentation", 'bbox_img'] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params['original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1])] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [(x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) for bbox in patch_limits] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning assert isinstance(cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) print(adapted_cond.shape) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient # apply model by loop over crops output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] assert not isinstance(output_list[0], tuple) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, sampled_concept = sampled_concept, sampled_index = sampled_index, **cond) # if isinstance(x_recon, tuple) and not return_ids: # return x_recon[0] # else: # return x_recon return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) @torch.no_grad() def test_step(self, batch, batch_idx): x = super().get_input(batch, self.cond_stage_key) cond = self.cond_stage_model(x) cond = torch.stack(cond.chunk(self.model.diffusion_model.latent_unit, dim = 1), dim=1) return {"cond":cond.detach().cpu()} @torch.no_grad() def test_step_end(self, batch_parts): return batch_parts["cond"] @torch.no_grad() def test_epoch_end(self, test_step_outputs): cond_cat = torch.cat(test_step_outputs, dim=0) cond_dir = os.path.join(self.logdir, "dis_repre","epoch={:06}.npz".format( self.current_epoch)) os.mkdir(os.path.join(self.logdir, "dis_repre")) np.savez(cond_dir, latents=cond_cat.numpy(), num_samples= np.array(self.global_step)) def dis_loss(self, model_forward, x_t, t, cond, sampled_concept): if not self.train_enc_flag: eval_encoder = copy.deepcopy(self.cond_stage_model) eval_encoder.requires_grad_(False) eval_encoder.eval() else: eval_encoder = self.cond_stage_model ddim_coef = extract_into_tensor(self.ddim_coef, t, x_t.shape) with torch.no_grad(): eps_hat = model_forward.pred z_start = self.predict_start_from_noise(x_t, t, eps_hat) pred_x0_t = self.differentiable_decode_first_stage(z_start, force_not_quantize=not self.detach_flag) if self.detach_flag: pred_x0_t = pred_x0_t.detach() else: pass pred_z = eval_encoder(pred_x0_t) z_parts = pred_z.chunk(self.model.diffusion_model.latent_unit, dim=1) pred_z = torch.stack(z_parts, dim=1) eps_new_hat = model_forward.pred + ddim_coef*model_forward.sub_grad z_start_new = self.predict_start_from_noise(x_t, t, eps_new_hat) pred_x0_new_t = self.differentiable_decode_first_stage(z_start_new, force_not_quantize=not self.detach_flag) if self.detach_flag: pred_x0_new_t = pred_x0_new_t.detach() else: pass pred_z_new = eval_encoder(pred_x0_new_t) z_parts = pred_z_new.chunk(self.model.diffusion_model.latent_unit, dim=1) cond = cond.chunk(self.model.diffusion_model.latent_unit, dim=1) pred_z_new = torch.stack(z_parts, dim=1) cond = torch.stack(cond, dim=1) with torch.no_grad(): norm_org = torch.norm(pred_z - cond.detach(), dim=-1) norm_Z = torch.norm(pred_z_new - cond.detach(), dim=-1) logits_deta = torch.norm(pred_z - pred_z_new, dim = -1) logits = norm_org - norm_Z dis_loss = self.ce_loss(logits, torch.from_numpy(sampled_concept).cuda()) dis_loss_deta = self.ce_loss(logits_deta, torch.from_numpy(sampled_concept).cuda()) if self.dis_loss_type == "IM": dis_weight = mean_flat((pred_x0_t - pred_x0_new_t.detach())**2) elif self.dis_loss_type == "Z": dis_weight = mean_flat((z_start - z_start_new.detach())**2) else: raise NotImplementedError return dis_weight * self.dis_weight * (dis_loss + dis_loss_deta) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) if self.dis_loss_flag: sampled_concept = np.random.randint(self.model.diffusion_model.latent_unit, size = x_noisy.shape[0]) model_output = self.apply_model(x_noisy, t, cond, sampled_concept = sampled_concept) dis_loss = self.dis_loss(model_output, x_noisy, t, cond, sampled_concept) else: model_output = self.apply_model(x_noisy, t, cond) eps_pred = return_wrap(model_output, extract_into_tensor(self.shift_coef, t, x_start.shape)) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise else: raise NotImplementedError() loss_simple = self.get_loss(eps_pred, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t.cpu()].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) if self.dis_loss_flag: loss = self.l_simple_weight * loss.mean() + dis_loss.mean() else: loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(eps_pred, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) loss_dict.update({f'{prefix}/epoch_num': self.current_epoch}) loss_dict.update({f'{prefix}/step_num': self.global_step}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) eps_pred = return_wrap(model_out,extract_into_tensor(self.ddim_coef, t, x.shape)) if score_corrector is not None: assert self.parameterization == "eps" eps_pred = score_corrector.modify_score(self, eps_pred, x, t, c, **corrector_kwargs) if return_codebook_ids: eps_pred, logits = eps_pred if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=eps_pred) elif self.parameterization == "x0": x_recon = eps_pred else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None,**kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates =ddim_sampler.sample(S = ddim_steps,batch_size = batch_size, shape = shape,conditioning = cond,verbose=False,**kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True,**kwargs) return samples, intermediates @torch.no_grad() def log_images(self, batch, N=8, n_row=8, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, plot_swapped_concepts = False, plot_decoded_xstart=False, plot_swapped_concepts_partial=True, **kwargs): use_ddim = ddim_steps is not None # plot_swapped_concepts = True log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) log["conditioning"] = xc elif self.cond_stage_key == 'class_label': xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) log['conditioning'] = xc
elif isimage(xc):
4
2023-10-07 09:58:07+00:00
16k
wiio12/LEGO-Prover
lego_prover/prover.py
[ { "identifier": "IsabelleEnv", "path": "lego_prover/env/isa_bridge.py", "snippet": "class IsabelleEnv(gym.Env):\n def __init__(\n self,\n logger=None,\n isabelle_path=\"/Users/wiio/Isabelle2022\",\n working_dir=\"miniF2F\",\n interactive_file=\"miniF2F/interactive.t...
import os import random import re import time import multiprocessing as mp import tiktoken import lego_prover.utils as U import logging from lego_prover.env.isa_bridge import IsabelleEnv from .agents import ActionAgent from .agents import CurriculumAgent from .agents import SkillManager from langchain.schema import HumanMessage
11,421
class Prover: def __init__( self, rank: int = None, isabelle_path: str = None, server_port: int = 8000, model_name: str = "gpt-4", temperature: int = 0, action_agent_task_max_retries: int = 4, curriculum_task_type: str = "simple_curriculum", curriculum_agent_lock = U.WithEmpty(), skill_manager_lock = U.WithEmpty(), chroma_bridge = None, openai_api_request_timeout: int = 6000, ckpt_dir: str = "ckpt", resume: bool = False, miniF2F_tasks: mp.Queue = None, ): """ Initializes a new instance of the Prover class. Args: rank (int): The rank of the prover process. isabelle_path (str): The path to the Isabelle directory. server_port (int): The port number for the server. model_name (str): The name of the OpenAI model to use. temperature (int): The temperature for sampling the LLM. action_agent_task_max_retries (int): The maximum number of retries for an action agent task. curriculum_task_type (str): The type of curriculum task to use. curriculum_agent_lock: The lock for the curriculum agent. skill_manager_lock: The lock for the skill manager. chroma_bridge: The ChromaBridge object for controlling the keyboard and mouse. openai_api_request_timeout (int): The timeout for OpenAI API requests. ckpt_dir (str): The directory for saving checkpoints. resume (bool): Whether to resume from the checkpoint. miniF2F_tasks (mp.Queue): The queue for miniF2F tasks. """ # init env self.rank = rank self.logger = logging.getLogger(f'prover-{rank}') self.logger.info(f"lego_prover running in rank {rank}") self.model_name = model_name
class Prover: def __init__( self, rank: int = None, isabelle_path: str = None, server_port: int = 8000, model_name: str = "gpt-4", temperature: int = 0, action_agent_task_max_retries: int = 4, curriculum_task_type: str = "simple_curriculum", curriculum_agent_lock = U.WithEmpty(), skill_manager_lock = U.WithEmpty(), chroma_bridge = None, openai_api_request_timeout: int = 6000, ckpt_dir: str = "ckpt", resume: bool = False, miniF2F_tasks: mp.Queue = None, ): """ Initializes a new instance of the Prover class. Args: rank (int): The rank of the prover process. isabelle_path (str): The path to the Isabelle directory. server_port (int): The port number for the server. model_name (str): The name of the OpenAI model to use. temperature (int): The temperature for sampling the LLM. action_agent_task_max_retries (int): The maximum number of retries for an action agent task. curriculum_task_type (str): The type of curriculum task to use. curriculum_agent_lock: The lock for the curriculum agent. skill_manager_lock: The lock for the skill manager. chroma_bridge: The ChromaBridge object for controlling the keyboard and mouse. openai_api_request_timeout (int): The timeout for OpenAI API requests. ckpt_dir (str): The directory for saving checkpoints. resume (bool): Whether to resume from the checkpoint. miniF2F_tasks (mp.Queue): The queue for miniF2F tasks. """ # init env self.rank = rank self.logger = logging.getLogger(f'prover-{rank}') self.logger.info(f"lego_prover running in rank {rank}") self.model_name = model_name
self.env = IsabelleEnv(
0
2023-10-09 04:23:43+00:00
16k
YingqingHe/ScaleCrafter-ptl
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler
12,763
if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) log["conditioning"] = xc elif self.cond_stage_key in ['class_label', "cls"]: try: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) log['conditioning'] = xc except KeyError: # probably no "human_label" in batch pass elif isimage(xc): log["conditioning"] = xc
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, force_null_conditioning=False, *args, **kwargs): self.force_null_conditioning = force_null_conditioning self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, return_x=False): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None and not self.force_null_conditioning: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox', "txt"]: xc = batch[cond_key] elif cond_key in ['class_label', 'cls']: xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_x: out.extend([x]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def decode_first_stage_tiles(self, z, predict_cids=False, force_not_quantize=False): assert(isinstance(z, (list, tuple))) assert(predict_cids is False) z = [1. / self.scale_factor * z_ for z_ in z] return self.first_stage_model.decode_tiles(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) log["conditioning"] = xc elif self.cond_stage_key in ['class_label', "cls"]: try: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) log['conditioning'] = xc except KeyError: # probably no "human_label" in batch pass elif isimage(xc): log["conditioning"] = xc
if ismap(xc):
3
2023-10-11 10:57:55+00:00
16k
bilibini/Lovely_Image_Downloader
dist/py/Python38/site-packages/charset_normalizer/cd.py
[ { "identifier": "FREQUENCIES", "path": "dist/py/Python38/site-packages/charset_normalizer/constant.py", "snippet": "FREQUENCIES: Dict[str, List[str]] = {\n \"English\": [\n \"e\",\n \"a\",\n \"t\",\n \"i\",\n \"o\",\n \"n\",\n \"s\",\n \"r\",\n ...
import importlib from codecs import IncrementalDecoder from collections import Counter from functools import lru_cache from typing import Counter as TypeCounter, Dict, List, Optional, Tuple from .constant import ( FREQUENCIES, KO_NAMES, LANGUAGE_SUPPORTED_COUNT, TOO_SMALL_SEQUENCE, ZH_NAMES, ) from .md import is_suspiciously_successive_range from .models import CoherenceMatches from .utils import ( is_accentuated, is_latin, is_multi_byte_encoding, is_unicode_range_secondary, unicode_range, )
11,643
def alpha_unicode_split(decoded_sequence: str) -> List[str]: """ Given a decoded text sequence, return a list of str. Unicode range / alphabet separation. Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list; One containing the latin letters and the other hebrew. """ layers: Dict[str, str] = {} for character in decoded_sequence: if character.isalpha() is False: continue character_range: Optional[str] = unicode_range(character) if character_range is None: continue layer_target_range: Optional[str] = None for discovered_range in layers: if ( is_suspiciously_successive_range(discovered_range, character_range) is False ): layer_target_range = discovered_range break if layer_target_range is None: layer_target_range = character_range if layer_target_range not in layers: layers[layer_target_range] = character.lower() continue layers[layer_target_range] += character.lower() return list(layers.values()) def merge_coherence_ratios(results: List[CoherenceMatches]) -> CoherenceMatches: """ This function merge results previously given by the function coherence_ratio. The return type is the same as coherence_ratio. """ per_language_ratios: Dict[str, List[float]] = {} for result in results: for sub_result in result: language, ratio = sub_result if language not in per_language_ratios: per_language_ratios[language] = [ratio] continue per_language_ratios[language].append(ratio) merge = [ ( language, round( sum(per_language_ratios[language]) / len(per_language_ratios[language]), 4, ), ) for language in per_language_ratios ] return sorted(merge, key=lambda x: x[1], reverse=True) def filter_alt_coherence_matches(results: CoherenceMatches) -> CoherenceMatches: """ We shall NOT return "English—" in CoherenceMatches because it is an alternative of "English". This function only keeps the best match and remove the em-dash in it. """ index_results: Dict[str, List[float]] = dict() for result in results: language, ratio = result no_em_name: str = language.replace("—", "") if no_em_name not in index_results: index_results[no_em_name] = [] index_results[no_em_name].append(ratio) if any(len(index_results[e]) > 1 for e in index_results): filtered_results: CoherenceMatches = [] for language in index_results: filtered_results.append((language, max(index_results[language]))) return filtered_results return results @lru_cache(maxsize=2048) def coherence_ratio( decoded_sequence: str, threshold: float = 0.1, lg_inclusion: Optional[str] = None ) -> CoherenceMatches: """ Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers. A layer = Character extraction by alphabets/ranges. """ results: List[Tuple[str, float]] = [] ignore_non_latin: bool = False sufficient_match_count: int = 0 lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else [] if "Latin Based" in lg_inclusion_list: ignore_non_latin = True lg_inclusion_list.remove("Latin Based") for layer in alpha_unicode_split(decoded_sequence): sequence_frequencies: TypeCounter[str] = Counter(layer) most_common = sequence_frequencies.most_common() character_count: int = sum(o for c, o in most_common)
def encoding_unicode_range(iana_name: str) -> List[str]: """ Return associated unicode ranges in a single byte code page. """ if is_multi_byte_encoding(iana_name): raise IOError("Function not supported on multi-byte code page") decoder = importlib.import_module( "encodings.{}".format(iana_name) ).IncrementalDecoder p: IncrementalDecoder = decoder(errors="ignore") seen_ranges: Dict[str, int] = {} character_count: int = 0 for i in range(0x40, 0xFF): chunk: str = p.decode(bytes([i])) if chunk: character_range: Optional[str] = unicode_range(chunk) if character_range is None: continue if is_unicode_range_secondary(character_range) is False: if character_range not in seen_ranges: seen_ranges[character_range] = 0 seen_ranges[character_range] += 1 character_count += 1 return sorted( [ character_range for character_range in seen_ranges if seen_ranges[character_range] / character_count >= 0.15 ] ) def unicode_range_languages(primary_range: str) -> List[str]: """ Return inferred languages used with a unicode range. """ languages: List[str] = [] for language, characters in FREQUENCIES.items(): for character in characters: if unicode_range(character) == primary_range: languages.append(language) break return languages @lru_cache() def encoding_languages(iana_name: str) -> List[str]: """ Single-byte encoding language association. Some code page are heavily linked to particular language(s). This function does the correspondence. """ unicode_ranges: List[str] = encoding_unicode_range(iana_name) primary_range: Optional[str] = None for specified_range in unicode_ranges: if "Latin" not in specified_range: primary_range = specified_range break if primary_range is None: return ["Latin Based"] return unicode_range_languages(primary_range) @lru_cache() def mb_encoding_languages(iana_name: str) -> List[str]: """ Multi-byte encoding language association. Some code page are heavily linked to particular language(s). This function does the correspondence. """ if ( iana_name.startswith("shift_") or iana_name.startswith("iso2022_jp") or iana_name.startswith("euc_j") or iana_name == "cp932" ): return ["Japanese"] if iana_name.startswith("gb") or iana_name in ZH_NAMES: return ["Chinese"] if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES: return ["Korean"] return [] @lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT) def get_target_features(language: str) -> Tuple[bool, bool]: """ Determine main aspects from a supported language if it contains accents and if is pure Latin. """ target_have_accents: bool = False target_pure_latin: bool = True for character in FREQUENCIES[language]: if not target_have_accents and is_accentuated(character): target_have_accents = True if target_pure_latin and is_latin(character) is False: target_pure_latin = False return target_have_accents, target_pure_latin def alphabet_languages( characters: List[str], ignore_non_latin: bool = False ) -> List[str]: """ Return associated languages associated to given characters. """ languages: List[Tuple[str, float]] = [] source_have_accents = any(is_accentuated(character) for character in characters) for language, language_characters in FREQUENCIES.items(): target_have_accents, target_pure_latin = get_target_features(language) if ignore_non_latin and target_pure_latin is False: continue if target_have_accents is False and source_have_accents: continue character_count: int = len(language_characters) character_match_count: int = len( [c for c in language_characters if c in characters] ) ratio: float = character_match_count / character_count if ratio >= 0.2: languages.append((language, ratio)) languages = sorted(languages, key=lambda x: x[1], reverse=True) return [compatible_language[0] for compatible_language in languages] def characters_popularity_compare( language: str, ordered_characters: List[str] ) -> float: """ Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language. The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit). Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.) """ if language not in FREQUENCIES: raise ValueError("{} not available".format(language)) character_approved_count: int = 0 FREQUENCIES_language_set = set(FREQUENCIES[language]) ordered_characters_count: int = len(ordered_characters) target_language_characters_count: int = len(FREQUENCIES[language]) large_alphabet: bool = target_language_characters_count > 26 for character, character_rank in zip( ordered_characters, range(0, ordered_characters_count) ): if character not in FREQUENCIES_language_set: continue character_rank_in_language: int = FREQUENCIES[language].index(character) expected_projection_ratio: float = ( target_language_characters_count / ordered_characters_count ) character_rank_projection: int = int(character_rank * expected_projection_ratio) if ( large_alphabet is False and abs(character_rank_projection - character_rank_in_language) > 4 ): continue if ( large_alphabet is True and abs(character_rank_projection - character_rank_in_language) < target_language_characters_count / 3 ): character_approved_count += 1 continue characters_before_source: List[str] = FREQUENCIES[language][ 0:character_rank_in_language ] characters_after_source: List[str] = FREQUENCIES[language][ character_rank_in_language: ] characters_before: List[str] = ordered_characters[0:character_rank] characters_after: List[str] = ordered_characters[character_rank:] before_match_count: int = len( set(characters_before) & set(characters_before_source) ) after_match_count: int = len( set(characters_after) & set(characters_after_source) ) if len(characters_before_source) == 0 and before_match_count <= 4: character_approved_count += 1 continue if len(characters_after_source) == 0 and after_match_count <= 4: character_approved_count += 1 continue if ( before_match_count / len(characters_before_source) >= 0.4 or after_match_count / len(characters_after_source) >= 0.4 ): character_approved_count += 1 continue return character_approved_count / len(ordered_characters) def alpha_unicode_split(decoded_sequence: str) -> List[str]: """ Given a decoded text sequence, return a list of str. Unicode range / alphabet separation. Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list; One containing the latin letters and the other hebrew. """ layers: Dict[str, str] = {} for character in decoded_sequence: if character.isalpha() is False: continue character_range: Optional[str] = unicode_range(character) if character_range is None: continue layer_target_range: Optional[str] = None for discovered_range in layers: if ( is_suspiciously_successive_range(discovered_range, character_range) is False ): layer_target_range = discovered_range break if layer_target_range is None: layer_target_range = character_range if layer_target_range not in layers: layers[layer_target_range] = character.lower() continue layers[layer_target_range] += character.lower() return list(layers.values()) def merge_coherence_ratios(results: List[CoherenceMatches]) -> CoherenceMatches: """ This function merge results previously given by the function coherence_ratio. The return type is the same as coherence_ratio. """ per_language_ratios: Dict[str, List[float]] = {} for result in results: for sub_result in result: language, ratio = sub_result if language not in per_language_ratios: per_language_ratios[language] = [ratio] continue per_language_ratios[language].append(ratio) merge = [ ( language, round( sum(per_language_ratios[language]) / len(per_language_ratios[language]), 4, ), ) for language in per_language_ratios ] return sorted(merge, key=lambda x: x[1], reverse=True) def filter_alt_coherence_matches(results: CoherenceMatches) -> CoherenceMatches: """ We shall NOT return "English—" in CoherenceMatches because it is an alternative of "English". This function only keeps the best match and remove the em-dash in it. """ index_results: Dict[str, List[float]] = dict() for result in results: language, ratio = result no_em_name: str = language.replace("—", "") if no_em_name not in index_results: index_results[no_em_name] = [] index_results[no_em_name].append(ratio) if any(len(index_results[e]) > 1 for e in index_results): filtered_results: CoherenceMatches = [] for language in index_results: filtered_results.append((language, max(index_results[language]))) return filtered_results return results @lru_cache(maxsize=2048) def coherence_ratio( decoded_sequence: str, threshold: float = 0.1, lg_inclusion: Optional[str] = None ) -> CoherenceMatches: """ Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers. A layer = Character extraction by alphabets/ranges. """ results: List[Tuple[str, float]] = [] ignore_non_latin: bool = False sufficient_match_count: int = 0 lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else [] if "Latin Based" in lg_inclusion_list: ignore_non_latin = True lg_inclusion_list.remove("Latin Based") for layer in alpha_unicode_split(decoded_sequence): sequence_frequencies: TypeCounter[str] = Counter(layer) most_common = sequence_frequencies.most_common() character_count: int = sum(o for c, o in most_common)
if character_count <= TOO_SMALL_SEQUENCE:
3
2023-10-11 09:08:57+00:00
16k
MTgeophysics/mtpy-v2
mtpy/modeling/occam1d/startup.py
[ { "identifier": "Occam1DData", "path": "mtpy/modeling/occam1d/data.py", "snippet": "class Occam1DData(object):\n \"\"\"\n reads and writes occam 1D data files\n\n ===================== =====================================================\n Attributes Description\n ===========...
from pathlib import Path from mtpy.modeling.occam1d import Occam1DData, Occam1DModel import time import numpy as np
12,060
self.data_fn = data_fn self.model_fn = model_fn if self.data_fn is not None: self.save_path = self.data_fn.parent elif self.model_fn is not None: self.save_path = self.model_fn.parent self.startup_fn = None self.rough_type = 1 self.max_iter = 20 self.target_rms = 1 self.start_rho = 100 self.description = "1D_Occam_Inv" self.start_lagrange = 5.0 self.start_rough = 1.0e7 self.debug_level = 1 self.start_iter = 0 self.start_misfit = 100 self.min_max_bounds = None self.model_step = None self._startup_fn = "OccamStartup1D" self._ss = " " * 3 for key, value in kwargs.items(): setattr(self, key, value) @property def data_fn(self): return self._data_fn @data_fn.setter def data_fn(self, fn): if fn is not None: self._data_fn = Path(fn) else: self._data_fn = None @property def model_fn(self): return self._model_fn @model_fn.setter def model_fn(self, fn): if fn is not None: self._model_fn = Path(fn) else: self._model_fn = None def write_startup_file(self, save_path=None, **kwargs): """ Make a 1D input file for Occam 1D Arguments: --------- **savepath** : full path to save input file to, if just path then saved as savepath/input **model_fn** : full path to model file, if None then assumed to be in savepath/model.mod **data_fn** : full path to data file, if None then assumed to be in savepath/TE.dat or TM.dat **rough_type** : roughness type. *default* = 0 **max_iter** : maximum number of iterations. *default* = 20 **target_rms** : target rms value. *default* = 1.0 **start_rho** : starting resistivity value on linear scale. *default* = 100 **description** : description of the inversion. **start_lagrange** : starting Lagrange multiplier for smoothness. *default* = 5 **start_rough** : starting roughness value. *default* = 1E7 **debuglevel** : something to do with how Fortran debuggs the code Almost always leave at *default* = 1 **start_iter** : the starting iteration number, handy if the starting model is from a previous run. *default* = 0 **start_misfit** : starting misfit value. *default* = 100 Returns: -------- **Occam1D.inputfn** : full path to input file. :Example: :: >>> old = occam.Occam1D() >>> old.make1DdataFile('MT01',edipath=r"/home/Line1", >>> savepath=r"/home/Occam1D/Line1/Inv1_TE", >>> mode='TE') >>> Wrote Data File: /home/Occam1D/Line1/Inv1_TE/MT01TE.dat >>> >>> old.make1DModelFile(savepath=r"/home/Occam1D/Line1/Inv1_TE", >>> nlayers=50,bottomlayer=10000,z1layer=50) >>> Wrote Model file: /home/Occam1D/Line1/Inv1_TE/Model1D >>> >>> old.make1DInputFile(rhostart=10,targetrms=1.5,maxiter=15) >>> Wrote Input File: /home/Occam1D/Line1/Inv1_TE/Input1D """ if save_path is not None: self.save_path = save_path if not self.save_path.is_dir(): self.save_path.mkdir() self.startup_fn = self.save_path.joinpath(self._startup_fn) # --> read data file if self.data_fn is None: raise IOError("Need to input data file name.") else:
# -*- coding: utf-8 -*- """ Created on Mon Oct 30 13:32:42 2023 @author: jpeacock """ # ============================================================================= # Imports # ============================================================================= # ============================================================================= class Occam1DStartup(object): """ read and write input files for Occam1D ====================== ==================================================== Attributes Description ====================== ==================================================== _ss string spacing _startup_fn basename of startup file *default* is OccamStartup1D data_fn full path to data file debug_level debug level *default* is 1 description description of inversion for your self *default* is 1D_Occam_Inv max_iter maximum number of iterations *default* is 20 model_fn full path to model file rough_type roughness type *default* is 1 save_path full path to save files to start_iter first iteration number *default* is 0 start_lagrange starting lagrange number on log scale *default* is 5 start_misfit starting misfit value *default* is 100 start_rho starting resistivity value (halfspace) in log scale *default* is 100 start_rough starting roughness (ignored by Occam1D) *default* is 1E7 startup_fn full path to startup file target_rms target rms *default* is 1.0 ====================== ==================================================== """ def __init__(self, data_fn=None, model_fn=None, **kwargs): self.data_fn = data_fn self.model_fn = model_fn if self.data_fn is not None: self.save_path = self.data_fn.parent elif self.model_fn is not None: self.save_path = self.model_fn.parent self.startup_fn = None self.rough_type = 1 self.max_iter = 20 self.target_rms = 1 self.start_rho = 100 self.description = "1D_Occam_Inv" self.start_lagrange = 5.0 self.start_rough = 1.0e7 self.debug_level = 1 self.start_iter = 0 self.start_misfit = 100 self.min_max_bounds = None self.model_step = None self._startup_fn = "OccamStartup1D" self._ss = " " * 3 for key, value in kwargs.items(): setattr(self, key, value) @property def data_fn(self): return self._data_fn @data_fn.setter def data_fn(self, fn): if fn is not None: self._data_fn = Path(fn) else: self._data_fn = None @property def model_fn(self): return self._model_fn @model_fn.setter def model_fn(self, fn): if fn is not None: self._model_fn = Path(fn) else: self._model_fn = None def write_startup_file(self, save_path=None, **kwargs): """ Make a 1D input file for Occam 1D Arguments: --------- **savepath** : full path to save input file to, if just path then saved as savepath/input **model_fn** : full path to model file, if None then assumed to be in savepath/model.mod **data_fn** : full path to data file, if None then assumed to be in savepath/TE.dat or TM.dat **rough_type** : roughness type. *default* = 0 **max_iter** : maximum number of iterations. *default* = 20 **target_rms** : target rms value. *default* = 1.0 **start_rho** : starting resistivity value on linear scale. *default* = 100 **description** : description of the inversion. **start_lagrange** : starting Lagrange multiplier for smoothness. *default* = 5 **start_rough** : starting roughness value. *default* = 1E7 **debuglevel** : something to do with how Fortran debuggs the code Almost always leave at *default* = 1 **start_iter** : the starting iteration number, handy if the starting model is from a previous run. *default* = 0 **start_misfit** : starting misfit value. *default* = 100 Returns: -------- **Occam1D.inputfn** : full path to input file. :Example: :: >>> old = occam.Occam1D() >>> old.make1DdataFile('MT01',edipath=r"/home/Line1", >>> savepath=r"/home/Occam1D/Line1/Inv1_TE", >>> mode='TE') >>> Wrote Data File: /home/Occam1D/Line1/Inv1_TE/MT01TE.dat >>> >>> old.make1DModelFile(savepath=r"/home/Occam1D/Line1/Inv1_TE", >>> nlayers=50,bottomlayer=10000,z1layer=50) >>> Wrote Model file: /home/Occam1D/Line1/Inv1_TE/Model1D >>> >>> old.make1DInputFile(rhostart=10,targetrms=1.5,maxiter=15) >>> Wrote Input File: /home/Occam1D/Line1/Inv1_TE/Input1D """ if save_path is not None: self.save_path = save_path if not self.save_path.is_dir(): self.save_path.mkdir() self.startup_fn = self.save_path.joinpath(self._startup_fn) # --> read data file if self.data_fn is None: raise IOError("Need to input data file name.") else:
data = Occam1DData()
0
2023-10-11 22:24:50+00:00
16k
Jacoo-ai/HIC-Yolov5
detect.py
[ { "identifier": "attempt_load", "path": "models/experimental.py", "snippet": "def attempt_load(weights, map_location=None, inplace=True, fuse=True):\n from models.yolo import Detect, Model\n\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n model = Ens...
import argparse import os import sys import cv2 import numpy as np import torch import torch.backends.cudnn as cudnn import onnxruntime import tensorflow as tf from pathlib import Path from models.experimental import attempt_load from utils.datasets import LoadImages, LoadStreams from utils.general import apply_classifier, check_img_size, check_imshow, check_requirements, check_suffix, colorstr, \ increment_path, non_max_suppression, print_args, save_one_box, scale_coords, set_logging, \ strip_optimizer, xyxy2xywh from utils.plots import Annotator, colors from utils.torch_utils import load_classifier, select_device, time_sync
11,855
pred[..., 3] *= imgsz[0] # h pred = torch.tensor(pred) t3 = time_sync() dt[1] += t3 - t2 # NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) if classify: pred = apply_classifier(pred, modelc, img, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count else: p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # img.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt s += '%gx%g ' % img.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *xyxy, conf, cls in reversed(det): if save_txt: # Write to file xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class # label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') label = None annotator.box_label(xyxy, label, color=colors(c, True)) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Print time (inference-only) print(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' print(f"Results saved to {colorstr('bold', save_dir)}{s}") if update: strip_optimizer(weights) # update model (to fix SourceChangeWarning) def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default='/root/autodl-tmp/best.pt', help='model path(s)') parser.add_argument('--source', type=str, default='/root/autodl-tmp/datasets/VisDrone2019/VisDrone2019-DET-train/images/0000150_01230_d_0000073.jpg', help='file/dir/URL/glob, 0 for webcam') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='show results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--visualize', action='store_true', help='visualize features') parser.add_argument('--update', action='store_true', help='update all models') parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name') parser.add_argument('--name', default='exp', help='save results to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Run inference on images, videos, directories, streams, etc. Usage: $ python path/to/detect.py --source path/to/img.jpg --weights yolov5s.pt --img 640 """ FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative @torch.no_grad() def run(weights=ROOT / 'yolov5m.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam imgsz=640, # inference size (pixels) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models project=ROOT / 'runs/detect', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( ('rtsp://', 'rtmp://', 'http://', 'https://')) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Initialize set_logging() device = select_device(device) half &= device.type != 'cpu' # half precision only supported on CUDA # Load model w = str(weights[0] if isinstance(weights, list) else weights) classify, suffix, suffixes = False, Path(w).suffix.lower(), ['.pt', '.onnx', '.tflite', '.pb', ''] check_suffix(w, suffixes) # check weights have acceptable suffix pt, onnx, tflite, pb, saved_model = (suffix == x for x in suffixes) # backend booleans stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults if pt: model = torch.jit.load(w) if 'torchscript' in w else attempt_load(weights, map_location=device) stride = int(model.stride.max()) # model stride names = model.module.names if hasattr(model, 'module') else model.names # get class names if half: model.half() # to FP16 if classify: # second-stage classifier modelc = load_classifier(name='resnet50', n=2) # initialize modelc.load_state_dict(torch.load('resnet50.pt', map_location=device)['model']).to(device).eval() elif onnx: if dnn: # check_requirements(('opencv-python>=4.5.4',)) net = cv2.dnn.readNetFromONNX(w) else: check_requirements(('onnx', 'onnxruntime')) session = onnxruntime.InferenceSession(w, None) else: # TensorFlow models check_requirements(('tensorflow>=2.4.1',)) if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt def wrap_frozen_graph(gd, inputs, outputs): x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped import return x.prune(tf.nest.map_structure(x.graph.as_graph_element, inputs), tf.nest.map_structure(x.graph.as_graph_element, outputs)) graph_def = tf.Graph().as_graph_def() graph_def.ParseFromString(open(w, 'rb').read()) frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0") elif saved_model: model = tf.keras.models.load_model(w) elif tflite: interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model interpreter.allocate_tensors() # allocate input_details = interpreter.get_input_details() # inputs output_details = interpreter.get_output_details() # outputs int8 = input_details[0]['dtype'] == np.uint8 # is TFLite quantized uint8 model imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference if pt and device.type != 'cpu': model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.parameters()))) # run once dt, seen = [0.0, 0.0, 0.0], 0 for path, img, im0s, vid_cap in dataset: t1 = time_sync() if onnx: img = img.astype('float32') else: img = torch.from_numpy(img).to(device) img = img.half() if half else img.float() # uint8 to fp16/32 img = img / 255.0 # 0 - 255 to 0.0 - 1.0 if len(img.shape) == 3: img = img[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference if pt: visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(img, augment=augment, visualize=visualize)[0] elif onnx: if dnn: net.setInput(img) pred = torch.tensor(net.forward()) else: pred = torch.tensor(session.run([session.get_outputs()[0].name], {session.get_inputs()[0].name: img})) else: # tensorflow model (tflite, pb, saved_model) imn = img.permute(0, 2, 3, 1).cpu().numpy() # image in numpy if pb: pred = frozen_func(x=tf.constant(imn)).numpy() elif saved_model: pred = model(imn, training=False).numpy() elif tflite: if int8: scale, zero_point = input_details[0]['quantization'] imn = (imn / scale + zero_point).astype(np.uint8) # de-scale interpreter.set_tensor(input_details[0]['index'], imn) interpreter.invoke() pred = interpreter.get_tensor(output_details[0]['index']) if int8: scale, zero_point = output_details[0]['quantization'] pred = (pred.astype(np.float32) - zero_point) * scale # re-scale pred[..., 0] *= imgsz[1] # x pred[..., 1] *= imgsz[0] # y pred[..., 2] *= imgsz[1] # w pred[..., 3] *= imgsz[0] # h pred = torch.tensor(pred) t3 = time_sync() dt[1] += t3 - t2 # NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) if classify: pred = apply_classifier(pred, modelc, img, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count else: p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # img.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt s += '%gx%g ' % img.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *xyxy, conf, cls in reversed(det): if save_txt: # Write to file xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class # label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') label = None annotator.box_label(xyxy, label, color=colors(c, True)) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Print time (inference-only) print(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' print(f"Results saved to {colorstr('bold', save_dir)}{s}") if update: strip_optimizer(weights) # update model (to fix SourceChangeWarning) def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--weights', nargs='+', type=str, default='/root/autodl-tmp/best.pt', help='model path(s)') parser.add_argument('--source', type=str, default='/root/autodl-tmp/datasets/VisDrone2019/VisDrone2019-DET-train/images/0000150_01230_d_0000073.jpg', help='file/dir/URL/glob, 0 for webcam') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='show results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') parser.add_argument('--nosave', action='store_true', help='do not save images/videos') parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--visualize', action='store_true', help='visualize features') parser.add_argument('--update', action='store_true', help='update all models') parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name') parser.add_argument('--name', default='exp', help='save results to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args() opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
print_args(FILE.stem, opt)
11
2023-10-12 08:52:01+00:00
16k
OmicsML/scDiff
scdiff/model.py
[ { "identifier": "Decoder", "path": "scdiff/modules/diffusion_model/decoder.py", "snippet": "class Decoder(nn.Module):\n def __init__(self, dim, out_dim, dropout=0., norm_type=\"layernorm\", num_layers=1, cond_num_dict=None,\n cond_emb_dim=None, cond_mask_ratio=0., act=\"gelu\", out_ac...
import warnings import anndata as ad import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F import pytorch_lightning as pl from contextlib import contextmanager from functools import partial from einops.layers.torch import Rearrange from scipy.sparse import csr_matrix from torch.optim.lr_scheduler import LambdaLR from tqdm import tqdm from scdiff.modules.diffusion_model import Decoder, Embedder, Encoder from scdiff.evaluate import ( denoising_eval, evaluate_annotation, perturbation_eval, calculate_batch_r_squared, ) from scdiff.modules.ema import LitEma from scdiff.modules.layers.attention import BasicTransformerBlock from scdiff.modules.layers.basic import FeedForward from scdiff.modules.layers.scmodel import EmbeddingDict from scdiff.utils.diffusion import MaskedEncoderConditioner, timestep_embedding from scdiff.utils.diffusion import make_beta_schedule from scdiff.utils.misc import as_1d_vec, exists, count_params, instantiate_from_config from scdiff.utils.misc import default from scdiff.utils.modules import create_activation, create_norm from scdiff.utils.modules import extract_into_tensor, init_weights, mean_flat, noise_like
11,109
self.cell_mask_ratio = cell_mask_ratio self.feat_mask_ratio = feat_mask_ratio self.mask_context = mask_context self.mask_mode = mask_mode self.mask_strategy = mask_strategy self.mask_value = mask_value self.pad_value = pad_value self.decoder_mask = decoder_mask # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # MAE encoder specifics activation = create_activation(activation) # self.in_dim = len(input_gene_list) if input_gene_list is not None else len(pretrained_gene_list) self.in_dim = len(pretrained_gene_list) if pretrained_gene_list is not None else len(input_gene_list) self.pretrained_gene_list = pretrained_gene_list self.input_gene_list = input_gene_list pretrained_gene_index = dict(zip(self.pretrained_gene_list, list(range(len(self.pretrained_gene_list))))) self.input_gene_idx = torch.tensor([ pretrained_gene_index[o] for o in self.input_gene_list if o in pretrained_gene_index ]).long() if self.input_gene_list is not None else None assert embed_dim == decoder_embed_dim # XXX: this seems to be required for MAE (see forward dec)? full_embed_dim = embed_dim * cond_tokens self.post_encoder_layer = Rearrange('b (n d) -> b n d', n=cond_tokens, d=embed_dim) self.embedder = Embedder(pretrained_gene_list, full_embed_dim, 'layernorm', dropout=dropout) self.encoder_type = encoder_type if encoder_type == 'attn': self.blocks = nn.ModuleList([ BasicTransformerBlock(full_embed_dim, num_heads, dim_head, self_attn=True, cross_attn=False, dropout=dropout, qkv_bias=True, final_act=activation) for _ in range(depth)]) elif encoder_type in ('mlp', 'mlpparallel'): self.blocks = nn.ModuleList([ nn.Sequential( nn.Linear(full_embed_dim, full_embed_dim), activation, create_norm(norm_layer, full_embed_dim), ) for _ in range(depth)]) elif encoder_type in ('stackffn', 'ffnparallel'): self.blocks = nn.ModuleList([ # FeedForward(full_embed_dim, mult=4, glu=False, dropout=dropout) nn.Sequential( FeedForward(full_embed_dim, mult=4, glu=False, dropout=dropout), create_norm(norm_layer, full_embed_dim), ) for _ in range(depth)]) elif encoder_type == 'none': self.blocks = None else: raise ValueError(f'Unknown encoder type {encoder_type}') # self.encoder_proj = nn.Linear(full_embed_dim, latent_dim) # self.norm = create_norm(norm_layer, full_embed_dim) # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # MAE decoder specifics self.subset_output = True self.decoder_embed_dim = decoder_embed_dim self.time_embed = nn.Sequential( nn.Linear(decoder_embed_dim, 4 * decoder_embed_dim), nn.SiLU(), nn.Linear(4 * decoder_embed_dim, decoder_embed_dim), ) if mlp_time_embed else nn.Identity() self.no_time_embed = no_time_embed self.cond_type = cond_type assert cond_strategy in ("full_mix", "pre_mix") self.cond_strategy = cond_strategy self.cond_emb_type = cond_emb_type self.cond_tokens = cond_tokens self.cond_cat_input = cond_cat_input if cond_dim is not None or cond_num_dict is not None: if cond_emb_type == 'linear': assert cond_dim is not None self.cond_embed = nn.Sequential( nn.Linear(cond_dim, decoder_embed_dim * cond_tokens), Rearrange('b (n d) -> b n d', n=cond_tokens, d=decoder_embed_dim), ) elif cond_emb_type == 'embedding': assert cond_num_dict is not None self.cond_embed = EmbeddingDict(cond_num_dict, decoder_embed_dim, depth, cond_tokens, mask_ratio=cond_mask_ratio, text_emb=text_emb, text_emb_file=text_emb_file, norm_layer=cond_emb_norm, freeze_text_emb=freeze_text_emb, text_proj_type=text_proj_type, text_proj_num_layers=text_proj_num_layers, stackfnn_glu_flag=stackfnn_glu_flag, text_proj_hidden_dim=text_proj_hidden_dim, text_proj_act=text_proj_act, text_proj_norm=text_proj_norm, # text_proj_dropout=dropout, G_go=G_go, # G_go_weight=G_go_weight, num_perts=num_perts, text_proj_dropout=dropout, gears_flag=gears_flag, num_perts=num_perts, gears_hidden_size=gears_hidden_size, gears_mode=gears_mode, gears_mlp_layers=gears_mlp_layers, gears_norm=gears_norm, num_go_gnn_layers=num_go_gnn_layers) elif cond_emb_type == 'none': self.cond_embed = None else: raise ValueError(f"Unknwon condition embedder type {cond_emb_type}") else: self.cond_embed = None self.encoder = Encoder(depth, decoder_embed_dim, decoder_num_heads, decoder_dim_head, dropout=dropout, cond_type=cond_type, cond_cat_input=cond_cat_input) # self.mask_token = nn.Parameter(torch.zeros(1, decoder_embed_dim)) self.decoder_embed_type = decoder_embed_type assert decoder_embed_type in ['linear', 'embedder', 'encoder'] if decoder_embed_type == 'linear': self.decoder_embed = nn.Linear(self.in_dim, decoder_embed_dim) elif decoder_embed_type == 'embedder': self.decoder_embed = Embedder(pretrained_gene_list, decoder_embed_dim, 'layernorm', dropout=dropout) elif decoder_embed_type == 'encoder': self.decoder_embed = self.embedder
""" Wild mixture of: https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/facebookresearch/mae/blob/efb2a8062c206524e35e47d04501ed4f544c0ae8 Thank you! """ RESCALE_FACTOR = np.log(1e4) class DiffusionModel(nn.Module): def __init__(self, pretrained_gene_list, input_gene_list=None, dropout=0., cell_mask_ratio=0.75, mask_context=True, encoder_type='stackffn', embed_dim=1024, depth=4, dim_head=64, num_heads=4, feat_mask_ratio=0., decoder_embed_dim=512, decoder_embed_type='linear', decoder_num_heads=4, decoder_dim_head=64, cond_dim=None, cond_tokens=1, cond_type='crossattn', cond_strategy='full_mix', cond_emb_type='linear', cond_num_dict=None, cond_mask_ratio=0.5, cond_cat_input=False, post_cond_num_dict=None, post_cond_layers=2, post_cond_norm='layernorm', post_cond_mask_ratio=0.0, norm_layer='layernorm', mlp_time_embed=False, no_time_embed=False, activation='gelu', mask_strategy='random', mask_mode='v1', mask_dec_cond=False, mask_dec_cond_ratio=False, mask_dec_cond_se=False, mask_dec_cond_semlp=False, mask_dec_cond_concat=False, mask_value=0, pad_value=0, decoder_mask=None, text_emb=None, text_emb_file=None, freeze_text_emb=True, text_proj_type='linear', text_proj_act=None, stackfnn_glu_flag=False, text_proj_hidden_dim=512, text_proj_num_layers=2, text_proj_norm=None, cond_emb_norm=None, num_perts=None, gears_flag=False, gears_hidden_size=64, gears_mode="single", gears_mlp_layers=2, gears_norm=None, num_go_gnn_layers=1): super().__init__() self.depth = depth # -------------------------------------------------------------------------- # MAE masking options self.cell_mask_ratio = cell_mask_ratio self.feat_mask_ratio = feat_mask_ratio self.mask_context = mask_context self.mask_mode = mask_mode self.mask_strategy = mask_strategy self.mask_value = mask_value self.pad_value = pad_value self.decoder_mask = decoder_mask # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # MAE encoder specifics activation = create_activation(activation) # self.in_dim = len(input_gene_list) if input_gene_list is not None else len(pretrained_gene_list) self.in_dim = len(pretrained_gene_list) if pretrained_gene_list is not None else len(input_gene_list) self.pretrained_gene_list = pretrained_gene_list self.input_gene_list = input_gene_list pretrained_gene_index = dict(zip(self.pretrained_gene_list, list(range(len(self.pretrained_gene_list))))) self.input_gene_idx = torch.tensor([ pretrained_gene_index[o] for o in self.input_gene_list if o in pretrained_gene_index ]).long() if self.input_gene_list is not None else None assert embed_dim == decoder_embed_dim # XXX: this seems to be required for MAE (see forward dec)? full_embed_dim = embed_dim * cond_tokens self.post_encoder_layer = Rearrange('b (n d) -> b n d', n=cond_tokens, d=embed_dim) self.embedder = Embedder(pretrained_gene_list, full_embed_dim, 'layernorm', dropout=dropout) self.encoder_type = encoder_type if encoder_type == 'attn': self.blocks = nn.ModuleList([ BasicTransformerBlock(full_embed_dim, num_heads, dim_head, self_attn=True, cross_attn=False, dropout=dropout, qkv_bias=True, final_act=activation) for _ in range(depth)]) elif encoder_type in ('mlp', 'mlpparallel'): self.blocks = nn.ModuleList([ nn.Sequential( nn.Linear(full_embed_dim, full_embed_dim), activation, create_norm(norm_layer, full_embed_dim), ) for _ in range(depth)]) elif encoder_type in ('stackffn', 'ffnparallel'): self.blocks = nn.ModuleList([ # FeedForward(full_embed_dim, mult=4, glu=False, dropout=dropout) nn.Sequential( FeedForward(full_embed_dim, mult=4, glu=False, dropout=dropout), create_norm(norm_layer, full_embed_dim), ) for _ in range(depth)]) elif encoder_type == 'none': self.blocks = None else: raise ValueError(f'Unknown encoder type {encoder_type}') # self.encoder_proj = nn.Linear(full_embed_dim, latent_dim) # self.norm = create_norm(norm_layer, full_embed_dim) # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # MAE decoder specifics self.subset_output = True self.decoder_embed_dim = decoder_embed_dim self.time_embed = nn.Sequential( nn.Linear(decoder_embed_dim, 4 * decoder_embed_dim), nn.SiLU(), nn.Linear(4 * decoder_embed_dim, decoder_embed_dim), ) if mlp_time_embed else nn.Identity() self.no_time_embed = no_time_embed self.cond_type = cond_type assert cond_strategy in ("full_mix", "pre_mix") self.cond_strategy = cond_strategy self.cond_emb_type = cond_emb_type self.cond_tokens = cond_tokens self.cond_cat_input = cond_cat_input if cond_dim is not None or cond_num_dict is not None: if cond_emb_type == 'linear': assert cond_dim is not None self.cond_embed = nn.Sequential( nn.Linear(cond_dim, decoder_embed_dim * cond_tokens), Rearrange('b (n d) -> b n d', n=cond_tokens, d=decoder_embed_dim), ) elif cond_emb_type == 'embedding': assert cond_num_dict is not None self.cond_embed = EmbeddingDict(cond_num_dict, decoder_embed_dim, depth, cond_tokens, mask_ratio=cond_mask_ratio, text_emb=text_emb, text_emb_file=text_emb_file, norm_layer=cond_emb_norm, freeze_text_emb=freeze_text_emb, text_proj_type=text_proj_type, text_proj_num_layers=text_proj_num_layers, stackfnn_glu_flag=stackfnn_glu_flag, text_proj_hidden_dim=text_proj_hidden_dim, text_proj_act=text_proj_act, text_proj_norm=text_proj_norm, # text_proj_dropout=dropout, G_go=G_go, # G_go_weight=G_go_weight, num_perts=num_perts, text_proj_dropout=dropout, gears_flag=gears_flag, num_perts=num_perts, gears_hidden_size=gears_hidden_size, gears_mode=gears_mode, gears_mlp_layers=gears_mlp_layers, gears_norm=gears_norm, num_go_gnn_layers=num_go_gnn_layers) elif cond_emb_type == 'none': self.cond_embed = None else: raise ValueError(f"Unknwon condition embedder type {cond_emb_type}") else: self.cond_embed = None self.encoder = Encoder(depth, decoder_embed_dim, decoder_num_heads, decoder_dim_head, dropout=dropout, cond_type=cond_type, cond_cat_input=cond_cat_input) # self.mask_token = nn.Parameter(torch.zeros(1, decoder_embed_dim)) self.decoder_embed_type = decoder_embed_type assert decoder_embed_type in ['linear', 'embedder', 'encoder'] if decoder_embed_type == 'linear': self.decoder_embed = nn.Linear(self.in_dim, decoder_embed_dim) elif decoder_embed_type == 'embedder': self.decoder_embed = Embedder(pretrained_gene_list, decoder_embed_dim, 'layernorm', dropout=dropout) elif decoder_embed_type == 'encoder': self.decoder_embed = self.embedder
self.mask_decoder_conditioner = MaskedEncoderConditioner(
11
2023-10-13 14:20:34+00:00
16k
weavel-ai/promptmodel-python
promptmodel/chat_model.py
[ { "identifier": "DevClient", "path": "promptmodel/dev_app.py", "snippet": "class DevClient:\n \"\"\"DevClient main class\"\"\"\n\n def __init__(self):\n self.function_models: List[FunctionModelInterface] = []\n self.chat_models: List[ChatModelInterface] = []\n\n def register(self,...
from dataclasses import dataclass from typing import Any, Dict, List, Optional, Coroutine, Union from uuid import uuid4 from litellm import ModelResponse from promptmodel import DevClient from promptmodel.llms.llm_proxy import LLMProxy from promptmodel.utils import logger from promptmodel.utils.config_utils import ( read_config, upsert_config, check_connection_status_decorator, ) from promptmodel.utils.async_utils import run_async_in_sync from promptmodel.types.response import LLMStreamResponse, LLMResponse, ChatModelConfig from promptmodel.types.enums import InstanceType from promptmodel.types.request import ChatLogRequest from promptmodel.apis.base import AsyncAPIClient import sys
11,515
from __future__ import annotations class RegisteringMeta(type): def __call__(cls, *args, **kwargs): instance: ChatModel = super().__call__(*args, **kwargs) # Find the global client instance in the current context client = cls.find_client_instance() if client is not None: client.register_chat_model(instance.name) return instance @staticmethod def find_client_instance(): # Get the current frame frame = sys._getframe(2) # Get global variables in the current frame global_vars = frame.f_globals # Find an instance of Client among global variables for var_name, var_val in global_vars.items():
from __future__ import annotations class RegisteringMeta(type): def __call__(cls, *args, **kwargs): instance: ChatModel = super().__call__(*args, **kwargs) # Find the global client instance in the current context client = cls.find_client_instance() if client is not None: client.register_chat_model(instance.name) return instance @staticmethod def find_client_instance(): # Get the current frame frame = sys._getframe(2) # Get global variables in the current frame global_vars = frame.f_globals # Find an instance of Client among global variables for var_name, var_val in global_vars.items():
if isinstance(var_val, DevClient):
0
2023-10-09 03:35:44+00:00
16k
cambridgeltl/ClaPS
algs/genetics.py
[ { "identifier": "BaseTrainer", "path": "algs/base_trainer.py", "snippet": "class BaseTrainer(abc.ABC):\n \"\"\"\n The base trainer class.\n\n Attributes:\n obj_func: the callable function handle for model interfacing.\n logger: an optional logger object.\n bn_calibrator: a ...
import random import numpy as np from typing import Any from .base_trainer import BaseTrainer from utils.fsc_datasets import PromptedClassificationDataset from rewards.text_classification_reward import PromptedClassificationReward
11,341
class Genetics: def __init__(self, crossover_tokenizer, vocab_id): self.crossover_tokenizer = crossover_tokenizer self.vocab_id = vocab_id def mutate(self, x, prob=0.1): """ Mutates the input string by replacing tokens with a certain probability. Args: x (str): The input string. prob (float, optional): The probability of replacing each token. Defaults to 0.1. Returns: str: The mutated string. """ x_list = self.crossover_tokenizer.encode(x) def pick_another(x_, candidates): return ( x_ if len(candidates) == 1 else random.choice([v for v in candidates if v != x_]) ) for i, element in enumerate(x_list): if i == 0 or i == len(x_list) - 1: continue if random.random() < prob: x_list[i] = pick_another(element, self.vocab_id) out = self.crossover_tokenizer.decode(x_list, skip_special_tokens=True) return out def crossover(self, x1, x2): """ Performs crossover between two input strings. Args: x1 (str): The first input string. x2 (str): The second input string. Returns: str: The crossover result. """ def _crossover_helper(v1, v2): return v1 if random.random() < 0.5 else v2 def _inbalance_helper(v1, v2): n_tokens = min(len(v1), len(v2)) max_n = max(len(v1), len(v2)) out_token = [] for i in range(n_tokens): out_token.append(v1[i] if random.random() < 0.5 else v2[i]) for i in range(n_tokens, max_n): out_token.append(v1[i] if len(v1) > n_tokens else v2[i]) return out_token x1_tokens = self.crossover_tokenizer.encode(x1) x2_tokens = self.crossover_tokenizer.encode(x2) x = _crossover_helper(x1_tokens, x2_tokens) ret = self.crossover_tokenizer.decode(x, skip_special_tokens=True) return ret def random_string(self, length=5): """ Generates a random string of a specified length. Args: length (int, optional): The length of the random string. Defaults to 5. Returns: str: The random string. """ choices = self.vocab_id out = random.choices(choices, k=length) out = self.crossover_tokenizer.decode(out, skip_special_tokens=True) return out def random_extend_pop(self, pop: list, n: int) -> list: """ Extends the population with random strings. Args: pop (list): The population. n (int): The number of random strings to generate. Returns: list: The extended population. """ pop = [p + self.random_string(n) for p in pop] return pop
class Genetics: def __init__(self, crossover_tokenizer, vocab_id): self.crossover_tokenizer = crossover_tokenizer self.vocab_id = vocab_id def mutate(self, x, prob=0.1): """ Mutates the input string by replacing tokens with a certain probability. Args: x (str): The input string. prob (float, optional): The probability of replacing each token. Defaults to 0.1. Returns: str: The mutated string. """ x_list = self.crossover_tokenizer.encode(x) def pick_another(x_, candidates): return ( x_ if len(candidates) == 1 else random.choice([v for v in candidates if v != x_]) ) for i, element in enumerate(x_list): if i == 0 or i == len(x_list) - 1: continue if random.random() < prob: x_list[i] = pick_another(element, self.vocab_id) out = self.crossover_tokenizer.decode(x_list, skip_special_tokens=True) return out def crossover(self, x1, x2): """ Performs crossover between two input strings. Args: x1 (str): The first input string. x2 (str): The second input string. Returns: str: The crossover result. """ def _crossover_helper(v1, v2): return v1 if random.random() < 0.5 else v2 def _inbalance_helper(v1, v2): n_tokens = min(len(v1), len(v2)) max_n = max(len(v1), len(v2)) out_token = [] for i in range(n_tokens): out_token.append(v1[i] if random.random() < 0.5 else v2[i]) for i in range(n_tokens, max_n): out_token.append(v1[i] if len(v1) > n_tokens else v2[i]) return out_token x1_tokens = self.crossover_tokenizer.encode(x1) x2_tokens = self.crossover_tokenizer.encode(x2) x = _crossover_helper(x1_tokens, x2_tokens) ret = self.crossover_tokenizer.decode(x, skip_special_tokens=True) return ret def random_string(self, length=5): """ Generates a random string of a specified length. Args: length (int, optional): The length of the random string. Defaults to 5. Returns: str: The random string. """ choices = self.vocab_id out = random.choices(choices, k=length) out = self.crossover_tokenizer.decode(out, skip_special_tokens=True) return out def random_extend_pop(self, pop: list, n: int) -> list: """ Extends the population with random strings. Args: pop (list): The population. n (int): The number of random strings to generate. Returns: list: The extended population. """ pop = [p + self.random_string(n) for p in pop] return pop
class GeneticAlgorithmTrainer(BaseTrainer):
0
2023-10-08 12:39:44+00:00
16k
clessig/atmorep
atmorep/core/trainer.py
[ { "identifier": "AtmoRep", "path": "atmorep/core/atmorep_model.py", "snippet": "class AtmoRep( torch.nn.Module) :\n\n def __init__(self, cf) :\n '''Constructor'''\n \n super( AtmoRep, self).__init__()\n\n self.cf = cf\n\n ###################################################\n def create( self...
import torch import torchinfo import numpy as np import code import os import datetime import functools import pandas as pd import wandb import torch.distributed as dist import torch.utils.data.distributed import atmorep.config.config as config import atmorep.utils.token_infos_transformations as token_infos_transformations import atmorep.utils.utils as utils from pathlib import Path from typing import TypeVar from torch.distributed.optim import ZeroRedundancyOptimizer from atmorep.core.atmorep_model import AtmoRep from atmorep.core.atmorep_model import AtmoRepData from atmorep.training.bert import prepare_batch_BERT_multifield from atmorep.transformer.transformer_base import positional_encoding_harmonic from atmorep.utils.utils import shape_to_str from atmorep.utils.utils import relMSELoss from atmorep.utils.utils import Gaussian from atmorep.utils.utils import CRPS from atmorep.utils.utils import NetMode from atmorep.utils.utils import sgn_exp from atmorep.datasets.data_writer import write_forecast, write_BERT, write_attention
13,635
if 0 == cf.par_rank : print( 'validation loss for strategy={} at epoch {} : {}'.format( BERT_test_strategy, epoch, total_loss), flush=True) if cf.with_wandb and (0 == cf.par_rank) : loss_dict = {"val. loss {}".format(BERT_test_strategy) : total_loss} total_losses = total_losses.cpu().detach() for i, field in enumerate(cf.fields_prediction) : idx_name = 'val., {}, '.format(BERT_test_strategy) + field[0] loss_dict[idx_name] = total_losses[i] print( 'validation loss for {} : {}'.format( field[0], total_losses[i] )) wandb.log( loss_dict) batch_data = [] torch.cuda.empty_cache() cf.BERT_strategy = BERT_strategy_train self.mode_test = False return total_loss ################################################### def evaluate( self, data_idx = 0, log = True): cf = self.cf self.model.mode( NetMode.test) log_sources = [] test_len = 0 # evaluate loss = torch.tensor( 0.) with torch.no_grad() : for it in range( self.model.len( NetMode.test)) : batch_data = self.model.next() if cf.par_rank < cf.log_test_num_ranks : # keep on cpu since it will otherwise clog up GPU memory (sources, token_infos, targets, tmis, tmis_list) = batch_data[0] # targets if len(batch_data[1]) > 0 : targets = [] for target_field in batch_data[1] : targets.append(torch.cat([target_vl[0].unsqueeze(1) for target_vl in target_field],1)) # store on cpu log_sources = ( [source.detach().clone().cpu() for source in sources ], [ti.detach().clone().cpu() for ti in token_infos], [target.detach().clone().cpu() for target in targets ], tmis, tmis_list ) batch_data = self.prepare_batch( batch_data) preds, atts = self.model( batch_data) ifield = 0 for pred, idx in zip( preds, self.fields_prediction_idx) : target = self.targets[idx] cur_loss = self.MSELoss( pred[0], target = target ).cpu() loss += cur_loss ifield += 1 test_len += 1 # logging if cf.par_rank < cf.log_test_num_ranks : self.log_validate( data_idx, it, log_sources, preds) if cf.attention: self.log_attention( data_idx , it, [atts, [ti.detach().clone().cpu() for ti in token_infos]]) # average over all nodes loss /= test_len * len(self.cf.fields_prediction) if cf.with_ddp : loss_cuda = loss.cuda() dist.all_reduce( loss_cuda, op=torch.distributed.ReduceOp.AVG ) loss = loss_cuda.cpu() if 0 == cf.par_rank : print( 'Loss {}'.format( loss)) ################################################### def test_loss( self, pred, target) : '''Hook for custom test loss''' pass ################################################### def loss( self, preds, batch_idx = 0) : # TODO: move implementations to individual files cf = self.cf mse_loss_total = torch.tensor( 0.,) losses = dict(zip(cf.losses,[[] for loss in cf.losses ])) for pred, idx in zip( preds, self.fields_prediction_idx) : target = self.targets[idx] mse_loss = self.MSELoss( pred[0], target = target) mse_loss_total += mse_loss.cpu().detach() # MSE loss if 'mse' in self.cf.losses : losses['mse'].append( mse_loss) # MSE loss if 'mse_ensemble' in self.cf.losses : loss_en = torch.tensor( 0., device=target.device) for en in torch.transpose( pred[2], 1, 0) : loss_en += self.MSELoss( en, target = target) # losses['mse_ensemble'].append( 50. * loss_en / pred[2].shape[1]) losses['mse_ensemble'].append( loss_en / pred[2].shape[1]) # Generalized cross entroy loss for continuous distributions if 'stats' in self.cf.losses :
#################################################################################################### # # Copyright (C) 2022 # #################################################################################################### # # project : atmorep # # author : atmorep collaboration # # description : # # license : # #################################################################################################### # code.interact(local=locals()) # import horovod.torch as hvd #################################################################################################### class Trainer_Base() : def __init__( self, cf, devices ) : self.cf = cf self.devices = devices self.device_in = devices[0] self.device_out = devices[-1] self.fields_prediction_idx = [] self.loss_weights = torch.zeros( len(cf.fields_prediction) ) for ifield, field in enumerate(cf.fields_prediction) : self.loss_weights[ifield] = self.cf.fields_prediction[ifield][1] for idx, field_info in enumerate(cf.fields) : if field_info[0] == field[0] : self.fields_prediction_idx.append( idx) break self.loss_weights = self.loss_weights.to( self.device_out) self.MSELoss = torch.nn.MSELoss() # transformation for token infos if hasattr( cf, 'token_infos_transformation') : self.tok_infos_trans = getattr( token_infos_transformations, cf.token_infos_transformation) else : self.tok_infos_trans = getattr( token_infos_transformations, 'identity') if 0 == cf.par_rank : directory = Path( config.path_results, 'id{}'.format( cf.wandb_id)) if not os.path.exists(directory): os.makedirs( directory) directory = Path( config.path_models, 'id{}'.format( cf.wandb_id)) if not os.path.exists(directory): os.makedirs( directory) ################################################### def create( self, load_embeds=True) : net = AtmoRep( self.cf) self.model = AtmoRepData( net) self.model.create( self.pre_batch, self.devices, load_embeds) # TODO: pass the properly to model / net self.model.net.encoder_to_decoder = self.encoder_to_decoder self.model.net.decoder_to_tail = self.decoder_to_tail return self ################################################### @classmethod def load( Typename, cf, model_id, epoch, devices) : trainer = Typename( cf, devices).create( load_embeds=False) trainer.model.net = trainer.model.net.load( model_id, devices, cf, epoch) # TODO: pass the properly to model / net trainer.model.net.encoder_to_decoder = trainer.encoder_to_decoder trainer.model.net.decoder_to_tail = trainer.decoder_to_tail str = 'Loaded model id = {}{}.'.format( model_id, f' at epoch = {epoch}' if epoch> -2 else '') print( str) return trainer ################################################### def save( self, epoch) : self.model.net.save( epoch) ################################################### def get_learn_rates( self) : cf = self.cf size_padding = 5 learn_rates = np.zeros( cf.num_epochs + size_padding) learn_rates[:cf.lr_start_epochs] = np.linspace( cf.lr_start, cf.lr_max, num = cf.lr_start_epochs) lr = learn_rates[cf.lr_start_epochs-1] ic = 0 for epoch in range( cf.lr_start_epochs, cf.num_epochs + size_padding) : lr = max( lr / cf.lr_decay_rate, cf.lr_min) learn_rates[epoch] = lr if ic > 9999 : # sanity check assert "Maximum number of epochs exceeded." return learn_rates ################################################### def run( self, epoch = -1) : cf = self.cf model = self.model learn_rates = self.get_learn_rates() if cf.with_ddp : self.model_ddp = torch.nn.parallel.DistributedDataParallel( model, static_graph=True) if not cf.optimizer_zero : self.optimizer = torch.optim.AdamW( self.model_ddp.parameters(), lr=cf.lr_start, weight_decay=cf.weight_decay) else : self.optimizer = ZeroRedundancyOptimizer(self.model_ddp.parameters(), optimizer_class=torch.optim.AdamW, lr=cf.lr_start ) else : self.optimizer = torch.optim.AdamW( self.model.parameters(), lr=cf.lr_start, weight_decay=cf.weight_decay) if 0 == cf.par_rank : # print( self.model.net) model_parameters = filter(lambda p: p.requires_grad, self.model_ddp.parameters()) num_params = sum([np.prod(p.size()) for p in model_parameters]) print( f'Number of trainable parameters: {num_params:,}') # test at the beginning as reference self.model.load_data( NetMode.test, batch_size=cf.batch_size_test) if cf.test_initial : cur_test_loss = self.validate( epoch, cf.BERT_strategy).cpu().numpy() test_loss = np.array( [cur_test_loss]) else : # generic value based on data normalization test_loss = np.array( [1.0]) epoch += 1 batch_size = cf.batch_size_start - cf.batch_size_delta if cf.profile : lr = learn_rates[epoch] for g in self.optimizer.param_groups: g['lr'] = lr self.model.load_data( NetMode.train, batch_size = cf.batch_size_max) self.profile() # training loop while True : if epoch >= cf.num_epochs : break lr = learn_rates[epoch] for g in self.optimizer.param_groups: g['lr'] = lr batch_size = min( cf.batch_size_max, batch_size + cf.batch_size_delta) tstr = datetime.datetime.now().strftime("%H:%M:%S") print( '{} : {} :: batch_size = {}, lr = {}'.format( epoch, tstr, batch_size, lr) ) self.model.load_data( NetMode.train, batch_size = batch_size) self.train( epoch) if cf.with_wandb and 0 == cf.par_rank : self.save( epoch) cur_test_loss = self.validate( epoch, cf.BERT_strategy).cpu().numpy() # self.validate( epoch, 'forecast') # save model if cur_test_loss < test_loss.min() : self.save( -2) test_loss = np.append( test_loss, [cur_test_loss]) epoch += 1 tstr = datetime.datetime.now().strftime("%H:%M:%S") print( 'Finished training at {} with test loss = {}.'.format( tstr, test_loss[-1]) ) # save final network if cf.with_wandb and 0 == cf.par_rank : self.save( -2) ################################################### def train( self, epoch): model = self.model cf = self.cf model.mode( NetMode.train) self.optimizer.zero_grad() loss_total = [[] for i in range(len(cf.losses)) ] std_dev_total = [[] for i in range(len(self.fields_prediction_idx)) ] mse_loss_total = [] grad_loss_total = [] ctr = 0 for batch_idx in range( model.len( NetMode.train)) : batch_data = self.model.next() batch_data = self.prepare_batch( batch_data) preds, _ = self.model_ddp( batch_data) loss, mse_loss, losses = self.loss( preds, batch_idx) self.optimizer.zero_grad() loss.backward() self.optimizer.step() [loss_total[idx].append( losses[key]) for idx, key in enumerate(losses)] mse_loss_total.append( mse_loss.detach().cpu() ) grad_loss_total.append( loss.detach().cpu() ) [std_dev_total[idx].append( pred[1].detach().cpu()) for idx, pred in enumerate(preds)] # logging if int((batch_idx * cf.batch_size_max) / 4) > ctr : # wandb logging if cf.with_wandb and (0 == cf.par_rank) : loss_dict = { "training loss": torch.mean( torch.tensor( mse_loss_total)), "gradient loss": torch.mean( torch.tensor( grad_loss_total)) } # log individual loss terms for individual fields for idx, cur_loss in enumerate(loss_total) : loss_name = self.cf.losses[idx] lt = torch.tensor(cur_loss) for i, field in enumerate(cf.fields_prediction) : idx_name = loss_name + ', ' + field[0] idx_std_name = 'stddev, ' + field[0] loss_dict[idx_name] = torch.mean( lt[:,i]).cpu().detach() loss_dict[idx_std_name] = torch.mean(torch.cat(std_dev_total[i],0)).cpu().detach() wandb.log( loss_dict ) # console output print('train epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:1.5f} : {:1.5f} :: {:1.5f}'.format( epoch, batch_idx, model.len( NetMode.train), 100. * batch_idx/model.len(NetMode.train), torch.mean( torch.tensor( grad_loss_total)), torch.mean(torch.tensor(mse_loss_total)), torch.mean( preds[0][1]) ), flush=True) # save model (use -2 as epoch to indicate latest, stored without epoch specification) # self.save( -2) # reset loss_total = [[] for i in range(len(cf.losses)) ] mse_loss_total = [] grad_loss_total = [] std_dev_total = [[] for i in range(len(self.fields_prediction_idx)) ] ctr += 1 # save gradients if cf.save_grads and cf.with_wandb and (0 == cf.par_rank) : dir_name = './grads/id{}'.format( cf.wandb_id) if not os.path.exists(dir_name): os.makedirs(dir_name) rmsprop_ws = [] for k in range( len(self.optimizer.state_dict()['state']) ) : rmsprop_ws.append(self.optimizer.state_dict()['state'][k]['exp_avg_sq'].mean().unsqueeze(0)) rmsprop_ws = torch.cat( rmsprop_ws) fname = '{}/{}_epoch{}_rmsprop.npy'.format( dir_name, cf.wandb_id, epoch) np.save( fname, rmsprop_ws.cpu().detach().numpy() ) idx = 0 for name, param in self.model.named_parameters(): if param.requires_grad : fname = '{}/{}_epoch{}_{:05d}_{}_grad.npy'.format( dir_name, cf.wandb_id, epoch, idx,name) np.save( fname, param.grad.cpu().detach().numpy() ) idx += 1 # clean memory self.optimizer.zero_grad() del batch_data, loss, loss_total, mse_loss_total, grad_loss_total, std_dev_total ################################################### def profile( self): model = self.model cf = self.cf model.mode( NetMode.train) self.optimizer.zero_grad() # See https://pytorch.org/tutorials/intermediate/tensorboard_profiler_tutorial.html # for details on how to load and analyse report # https://pytorch.org/blog/trace-analysis-for-masses/ # do for all par_ranks to avoid that they run out of sync print( '---------------------------------') print( 'Profiling:') pname = './logs/profile_par_rank' + str(cf.par_rank) + '_' + cf.wandb_id + '/profile' with torch.profiler.profile( activities=[torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA], schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=2), on_trace_ready=torch.profiler.tensorboard_trace_handler(pname), profile_memory=True, record_shapes=True, with_stack=True) as prof: for batch_idx in range( 2 * (1+1+3) ) : batch_data = self.model.next() batch_data = self.prepare_batch( batch_data) preds, _ = self.model_ddp( batch_data) loss, mse_loss, losses = self.loss( preds, batch_idx) self.optimizer.zero_grad() # loss.backward() # self.optimizer.step() prof.step() print( 'Profiling finished.') print( '---------------------------------') ################################################### def validate( self, epoch, BERT_test_strategy = 'BERT'): cf = self.cf BERT_strategy_train = cf.BERT_strategy cf.BERT_strategy = BERT_test_strategy self.model.mode( NetMode.test) total_loss = 0. total_losses = torch.zeros( len(self.fields_prediction_idx) ) test_len = 0 self.mode_test = True # run in training mode offset = 0 if -1 == epoch and 0 == cf.par_rank : if 1 == cf.num_accs_per_task : # bug in torchinfo; fixed in v1.8.0 offset += 1 print( 'Network size:') batch_data = self.model.next() batch_data = self.prepare_batch( batch_data) torchinfo.summary( self.model, input_data=[batch_data]) # run test set evaluation with torch.no_grad() : for it in range( self.model.len( NetMode.test) - offset) : batch_data = self.model.next() if cf.par_rank < cf.log_test_num_ranks : # keep on cpu since it will otherwise clog up GPU memory (sources, token_infos, targets, tmis, tmis_list) = batch_data[0] # targets if len(batch_data[1]) > 0 : if type(batch_data[1][0][0]) is list : targets = [batch_data[1][i][0][0] for i in range( len(batch_data[1]))] else : targets = batch_data[1][0] # store on cpu log_sources = ( [source.detach().clone().cpu() for source in sources ], [ti.detach().clone().cpu() for ti in token_infos], [target.detach().clone().cpu() for target in targets ], tmis, tmis_list ) batch_data = self.prepare_batch( batch_data) preds, atts = self.model( batch_data) loss = torch.tensor( 0.) ifield = 0 for pred, idx in zip( preds, self.fields_prediction_idx) : target = self.targets[idx] # hook for custom test loss self.test_loss( pred, target) # base line loss cur_loss = self.MSELoss( pred[0], target = target ).cpu().item() loss += cur_loss total_losses[ifield] += cur_loss ifield += 1 total_loss += loss test_len += 1 # store detailed results on current test set for book keeping if cf.par_rank < cf.log_test_num_ranks : log_preds = [[p.detach().clone().cpu() for p in pred] for pred in preds] self.log_validate( epoch, it, log_sources, log_preds) if cf.attention: self.log_attention( epoch, it, [atts, [ti.detach().clone().cpu() for ti in token_infos]]) # average over all nodes total_loss /= test_len * len(self.cf.fields_prediction) total_losses /= test_len if cf.with_ddp : total_loss_cuda = total_loss.cuda() total_losses_cuda = total_losses.cuda() dist.all_reduce( total_loss_cuda, op=torch.distributed.ReduceOp.AVG ) dist.all_reduce( total_losses_cuda, op=torch.distributed.ReduceOp.AVG ) total_loss = total_loss_cuda.cpu() total_losses = total_losses_cuda.cpu() if 0 == cf.par_rank : print( 'validation loss for strategy={} at epoch {} : {}'.format( BERT_test_strategy, epoch, total_loss), flush=True) if cf.with_wandb and (0 == cf.par_rank) : loss_dict = {"val. loss {}".format(BERT_test_strategy) : total_loss} total_losses = total_losses.cpu().detach() for i, field in enumerate(cf.fields_prediction) : idx_name = 'val., {}, '.format(BERT_test_strategy) + field[0] loss_dict[idx_name] = total_losses[i] print( 'validation loss for {} : {}'.format( field[0], total_losses[i] )) wandb.log( loss_dict) batch_data = [] torch.cuda.empty_cache() cf.BERT_strategy = BERT_strategy_train self.mode_test = False return total_loss ################################################### def evaluate( self, data_idx = 0, log = True): cf = self.cf self.model.mode( NetMode.test) log_sources = [] test_len = 0 # evaluate loss = torch.tensor( 0.) with torch.no_grad() : for it in range( self.model.len( NetMode.test)) : batch_data = self.model.next() if cf.par_rank < cf.log_test_num_ranks : # keep on cpu since it will otherwise clog up GPU memory (sources, token_infos, targets, tmis, tmis_list) = batch_data[0] # targets if len(batch_data[1]) > 0 : targets = [] for target_field in batch_data[1] : targets.append(torch.cat([target_vl[0].unsqueeze(1) for target_vl in target_field],1)) # store on cpu log_sources = ( [source.detach().clone().cpu() for source in sources ], [ti.detach().clone().cpu() for ti in token_infos], [target.detach().clone().cpu() for target in targets ], tmis, tmis_list ) batch_data = self.prepare_batch( batch_data) preds, atts = self.model( batch_data) ifield = 0 for pred, idx in zip( preds, self.fields_prediction_idx) : target = self.targets[idx] cur_loss = self.MSELoss( pred[0], target = target ).cpu() loss += cur_loss ifield += 1 test_len += 1 # logging if cf.par_rank < cf.log_test_num_ranks : self.log_validate( data_idx, it, log_sources, preds) if cf.attention: self.log_attention( data_idx , it, [atts, [ti.detach().clone().cpu() for ti in token_infos]]) # average over all nodes loss /= test_len * len(self.cf.fields_prediction) if cf.with_ddp : loss_cuda = loss.cuda() dist.all_reduce( loss_cuda, op=torch.distributed.ReduceOp.AVG ) loss = loss_cuda.cpu() if 0 == cf.par_rank : print( 'Loss {}'.format( loss)) ################################################### def test_loss( self, pred, target) : '''Hook for custom test loss''' pass ################################################### def loss( self, preds, batch_idx = 0) : # TODO: move implementations to individual files cf = self.cf mse_loss_total = torch.tensor( 0.,) losses = dict(zip(cf.losses,[[] for loss in cf.losses ])) for pred, idx in zip( preds, self.fields_prediction_idx) : target = self.targets[idx] mse_loss = self.MSELoss( pred[0], target = target) mse_loss_total += mse_loss.cpu().detach() # MSE loss if 'mse' in self.cf.losses : losses['mse'].append( mse_loss) # MSE loss if 'mse_ensemble' in self.cf.losses : loss_en = torch.tensor( 0., device=target.device) for en in torch.transpose( pred[2], 1, 0) : loss_en += self.MSELoss( en, target = target) # losses['mse_ensemble'].append( 50. * loss_en / pred[2].shape[1]) losses['mse_ensemble'].append( loss_en / pred[2].shape[1]) # Generalized cross entroy loss for continuous distributions if 'stats' in self.cf.losses :
stats_loss = Gaussian( target, pred[0], pred[1])
6
2023-10-09 19:42:46+00:00
16k
NKI-AI/ahcore
ahcore/callbacks/wsi_metric_callback.py
[ { "identifier": "WriteH5Callback", "path": "ahcore/callbacks/h5_callback.py", "snippet": "class WriteH5Callback(Callback):\n def __init__(\n self,\n max_queue_size: int,\n max_concurrent_writers: int,\n dump_dir: Path,\n normalization_type: str = str(NormalizationTy...
import itertools import json import multiprocessing import time import pytorch_lightning as pl import torch from collections import namedtuple from multiprocessing.pool import Pool from pathlib import Path from typing import Any, Generator, Optional, cast from pytorch_lightning import Callback from ahcore.callbacks import WriteH5Callback from ahcore.lit_module import AhCoreLightningModule from ahcore.metrics import WSIMetricFactory from ahcore.readers import H5FileImageReader, StitchingMode from ahcore.utils.callbacks import _get_h5_output_filename, _ValidationDataset from ahcore.utils.data import DataDescription from ahcore.utils.io import get_logger from ahcore.utils.manifest import DataManager, ImageMetadata, fetch_image_metadata, get_mask_and_annotations_from_record
13,259
# Check for completed tasks for result in list(results_to_filename.keys()): if result.ready(): filename = results_to_filename.pop(result) try: metric = result.get() except Exception as exc: self._logger.error("%r generated an exception: %s" % (filename, exc)) else: metrics.append(metric) self._logger.debug("Metric for %r is %s" % (filename, metric)) completed_tasks += 1 # Schedule a new task if there are more filenames left in the generator next_metadata = next(self._validate_metadata, None) while next_metadata: task_data = prepare_task_data( next_metadata.filename, # <-- Changed from image_metadata.filename self._dump_dir, pl_module, self._data_description, self._data_manager, ) # Schedule task schedule_task( task_data, pool, results_to_filename, self._class_names, self._data_description, self._wsi_metrics, self._save_per_image, ) next_metadata = next(self._validate_metadata, None) return metrics def on_validation_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: if not self._dump_dir: raise ValueError("Dump directory is not set.") if not self._wsi_metrics: raise ValueError("WSI metrics are not set.") assert self._model_name # This should be set in the setup() # Ensure that all h5 files have been written self._logger.debug("Computing metrics for %s predictions", len(self._filenames)) computed_metrics = self.compute_metrics(trainer, pl_module) metrics = self._wsi_metrics.get_average_score(computed_metrics) results_json_fn = ( self._dump_dir / "outputs" / self._model_name / f"step_{pl_module.global_step}" / "results.json" ) with open(results_json_fn, "w", encoding="utf-8") as json_file: json.dump(self._dump_list, json_file, indent=2) self._wsi_metrics.reset() # Reset stuff self._dump_list = [] self._filenames = {} self._logger.debug("Metrics: %s", metrics) # TODO: Maybe put this elsewhere? metrics = {f"validate/{k}": v for k, v in metrics.items()} pl_module.log_dict(metrics, prog_bar=True) TaskData = namedtuple("TaskData", ["filename", "h5_filename", "metadata", "mask", "annotations"]) def prepare_task_data( filename: Path, dump_dir: Path, pl_module: pl.LightningModule, data_description: DataDescription, data_manager: DataManager, ) -> TaskData: h5_filename = _get_h5_output_filename( dump_dir=dump_dir, input_path=data_description.data_dir / filename, model_name=str(pl_module.name), step=pl_module.global_step, ) image = data_manager.get_image_by_filename(str(filename)) metadata = fetch_image_metadata(image) mask, annotations = get_mask_and_annotations_from_record(data_description.annotations_dir, image) return TaskData(filename, h5_filename, metadata, mask, annotations) def schedule_task( task_data: TaskData, pool: Pool, results_dict: dict[Any, str], # Any because it will be a multiprocessing.pool.AsyncResult class_names: dict[int, str], data_description: DataDescription, wsi_metrics: WSIMetricFactory, save_per_image: bool, ) -> None: result = pool.apply_async( compute_metrics_for_case, args=(task_data, class_names, data_description, wsi_metrics, save_per_image), ) results_dict[result] = task_data.filename def compute_metrics_for_case( task_data: TaskData, class_names: dict[int, str], data_description: DataDescription, wsi_metrics: WSIMetricFactory, save_per_image: bool, ) -> list[dict[str, Any]]: # Extract the data from the namedtuple filename, h5_filename, metadata, mask, annotations = task_data dump_list = [] logger.info("Computing metrics for %s", filename)
from __future__ import annotations logger = get_logger(__name__) class ComputeWsiMetricsCallback(Callback): def __init__(self, max_processes: int = 10, save_per_image: bool = True) -> None: """ Callback to compute metrics on whole-slide images. This callback is used to compute metrics on whole-slide images in separate processes. Parameters ---------- max_processes : int The maximum number of concurrent processes. """ self._data_description: Optional[DataDescription] = None self._reader = H5FileImageReader self._max_processes: int = max_processes self._dump_dir: Optional[Path] = None self._save_per_image = save_per_image self._filenames: dict[Path, Path] = {} self._wsi_metrics: WSIMetricFactory | None = None self._class_names: dict[int, str] = {} self._data_manager = None self._validate_filenames_gen = None self._model_name: str | None = None self._validate_metadata_gen: Generator[ImageMetadata, None, None] | None = None self._dump_list: list[dict[str, str]] = [] self._logger = get_logger(type(self).__name__) def setup( self, trainer: pl.Trainer, pl_module: pl.LightningModule, stage: Optional[str] = None, ) -> None: if not isinstance(pl_module, AhCoreLightningModule): # TODO: Make a AhCoreCallback with these features raise ValueError("AhCoreLightningModule required for WriteTiffCallback.") self._model_name = pl_module.name _callback: Optional[WriteH5Callback] = None for idx, callback in enumerate(trainer.callbacks): # type: ignore if isinstance(callback, WriteH5Callback): _callback = cast(WriteH5Callback, trainer.callbacks[idx]) # type: ignore break if _callback is None: raise ValueError( "WriteH5Callback is not in the trainer's callbacks. " "This is required before WSI metrics can be computed using this Callback" ) self._dump_dir = _callback.dump_dir if pl_module.wsi_metrics is None: raise ValueError("WSI metrics are not set.") self._wsi_metrics = pl_module.wsi_metrics self._data_description = trainer.datamodule.data_description # type: ignore # For mypy assert self._data_description index_map = self._data_description.index_map assert index_map if not self._data_description: raise ValueError("Data description is not set.") self._class_names = dict([(v, k) for k, v in index_map.items()]) self._class_names[0] = "background" # Here we can query the database for the validation images self._data_manager: DataManager = trainer.datamodule.data_manager # type: ignore def _create_validate_image_metadata_gen( self, ) -> Generator[ImageMetadata, None, None]: assert self._data_description assert self._data_manager gen = self._data_manager.get_image_metadata_by_split( manifest_name=self._data_description.manifest_name, split_version=self._data_description.split_version, split_category="validate", ) for image_metadata in gen: yield image_metadata @property def _validate_metadata(self) -> Generator[ImageMetadata, None, None] | None: return self._validate_metadata_gen def on_validation_epoch_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: self._validate_metadata_gen = self._create_validate_image_metadata_gen() def on_validation_batch_end( self, trainer: pl.Trainer, pl_module: pl.LightningModule, outputs: Any, batch: Any, batch_idx: int, dataloader_idx: int = 0, ) -> None: if not self._dump_dir: raise ValueError("Dump directory is not set.") filenames = batch["path"] # Filenames are constant across the batch. if len(set(filenames)) != 1: raise ValueError( "All paths in a batch must be the same. " "Either use batch_size=1 or ahcore.data.samplers.WsiBatchSampler." ) def compute_metrics( self, trainer: pl.Trainer, pl_module: pl.LightningModule ) -> list[list[dict[str, dict[str, float]]]]: assert self._dump_dir assert self._data_description assert self._validate_metadata assert self._data_manager metrics = [] with multiprocessing.Pool(processes=self._max_processes) as pool: results_to_filename: dict[list[dict[str, Any]], str] = {} completed_tasks = 0 # Fill up the initial task pool for image_metadata in itertools.islice(self._validate_metadata, self._max_processes): logger.info("Metadata: %s", image_metadata) # Assemble the task data # filename", "h5_filename", "metadata", "mask", "annotations" task_data = prepare_task_data( image_metadata.filename, self._dump_dir, pl_module, self._data_description, self._data_manager, ) # Schedule task schedule_task( task_data, pool, results_to_filename, self._class_names, self._data_description, self._wsi_metrics, self._save_per_image, ) while results_to_filename: time.sleep(0.1) # Reduce excessive polling # Check for completed tasks for result in list(results_to_filename.keys()): if result.ready(): filename = results_to_filename.pop(result) try: metric = result.get() except Exception as exc: self._logger.error("%r generated an exception: %s" % (filename, exc)) else: metrics.append(metric) self._logger.debug("Metric for %r is %s" % (filename, metric)) completed_tasks += 1 # Schedule a new task if there are more filenames left in the generator next_metadata = next(self._validate_metadata, None) while next_metadata: task_data = prepare_task_data( next_metadata.filename, # <-- Changed from image_metadata.filename self._dump_dir, pl_module, self._data_description, self._data_manager, ) # Schedule task schedule_task( task_data, pool, results_to_filename, self._class_names, self._data_description, self._wsi_metrics, self._save_per_image, ) next_metadata = next(self._validate_metadata, None) return metrics def on_validation_epoch_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None: if not self._dump_dir: raise ValueError("Dump directory is not set.") if not self._wsi_metrics: raise ValueError("WSI metrics are not set.") assert self._model_name # This should be set in the setup() # Ensure that all h5 files have been written self._logger.debug("Computing metrics for %s predictions", len(self._filenames)) computed_metrics = self.compute_metrics(trainer, pl_module) metrics = self._wsi_metrics.get_average_score(computed_metrics) results_json_fn = ( self._dump_dir / "outputs" / self._model_name / f"step_{pl_module.global_step}" / "results.json" ) with open(results_json_fn, "w", encoding="utf-8") as json_file: json.dump(self._dump_list, json_file, indent=2) self._wsi_metrics.reset() # Reset stuff self._dump_list = [] self._filenames = {} self._logger.debug("Metrics: %s", metrics) # TODO: Maybe put this elsewhere? metrics = {f"validate/{k}": v for k, v in metrics.items()} pl_module.log_dict(metrics, prog_bar=True) TaskData = namedtuple("TaskData", ["filename", "h5_filename", "metadata", "mask", "annotations"]) def prepare_task_data( filename: Path, dump_dir: Path, pl_module: pl.LightningModule, data_description: DataDescription, data_manager: DataManager, ) -> TaskData: h5_filename = _get_h5_output_filename( dump_dir=dump_dir, input_path=data_description.data_dir / filename, model_name=str(pl_module.name), step=pl_module.global_step, ) image = data_manager.get_image_by_filename(str(filename)) metadata = fetch_image_metadata(image) mask, annotations = get_mask_and_annotations_from_record(data_description.annotations_dir, image) return TaskData(filename, h5_filename, metadata, mask, annotations) def schedule_task( task_data: TaskData, pool: Pool, results_dict: dict[Any, str], # Any because it will be a multiprocessing.pool.AsyncResult class_names: dict[int, str], data_description: DataDescription, wsi_metrics: WSIMetricFactory, save_per_image: bool, ) -> None: result = pool.apply_async( compute_metrics_for_case, args=(task_data, class_names, data_description, wsi_metrics, save_per_image), ) results_dict[result] = task_data.filename def compute_metrics_for_case( task_data: TaskData, class_names: dict[int, str], data_description: DataDescription, wsi_metrics: WSIMetricFactory, save_per_image: bool, ) -> list[dict[str, Any]]: # Extract the data from the namedtuple filename, h5_filename, metadata, mask, annotations = task_data dump_list = [] logger.info("Computing metrics for %s", filename)
with H5FileImageReader(h5_filename, stitching_mode=StitchingMode.CROP) as h5reader:
4
2023-10-14 18:04:12+00:00
16k
fury-05/BookRecomendApp
.pythonlibs/lib/python3.10/site-packages/sklearn/cluster/_dbscan.py
[ { "identifier": "BaseEstimator", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/base.py", "snippet": "class BaseEstimator(_MetadataRequester):\n \"\"\"Base class for all estimators in scikit-learn.\n\n Notes\n -----\n All estimators should specify all the parameters that can be se...
import warnings import numpy as np from numbers import Integral, Real from scipy import sparse from ..base import BaseEstimator, ClusterMixin, _fit_context from ..metrics.pairwise import _VALID_METRICS from ..neighbors import NearestNeighbors from ..utils._param_validation import Interval, StrOptions from ..utils.validation import _check_sample_weight from ._dbscan_inner import dbscan_inner
12,551
metric : str, or callable, default='euclidean' The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by :func:`sklearn.metrics.pairwise_distances` for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square. X may be a :term:`sparse graph`, in which case only "nonzero" elements may be considered neighbors for DBSCAN. .. versionadded:: 0.17 metric *precomputed* to accept precomputed sparse matrix. metric_params : dict, default=None Additional keyword arguments for the metric function. .. versionadded:: 0.19 algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' The algorithm to be used by the NearestNeighbors module to compute pointwise distances and find nearest neighbors. See NearestNeighbors module documentation for details. leaf_size : int, default=30 Leaf size passed to BallTree or cKDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. p : float, default=None The power of the Minkowski metric to be used to calculate distance between points. If None, then ``p=2`` (equivalent to the Euclidean distance). n_jobs : int, default=None The number of parallel jobs to run. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. Attributes ---------- core_sample_indices_ : ndarray of shape (n_core_samples,) Indices of core samples. components_ : ndarray of shape (n_core_samples, n_features) Copy of each core sample found by training. labels_ : ndarray of shape (n_samples) Cluster labels for each point in the dataset given to fit(). Noisy samples are given the label -1. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- OPTICS : A similar clustering at multiple values of eps. Our implementation is optimized for memory usage. Notes ----- For an example, see :ref:`examples/cluster/plot_dbscan.py <sphx_glr_auto_examples_cluster_plot_dbscan.py>`. This implementation bulk-computes all neighborhood queries, which increases the memory complexity to O(n.d) where d is the average number of neighbors, while original DBSCAN had memory complexity O(n). It may attract a higher memory complexity when querying these nearest neighborhoods, depending on the ``algorithm``. One way to avoid the query complexity is to pre-compute sparse neighborhoods in chunks using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``, then using ``metric='precomputed'`` here. Another way to reduce memory and computation time is to remove (near-)duplicate points and use ``sample_weight`` instead. :class:`~sklearn.cluster.OPTICS` provides a similar clustering with lower memory usage. References ---------- Ester, M., H. P. Kriegel, J. Sander, and X. Xu, `"A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise" <https://www.dbs.ifi.lmu.de/Publikationen/Papers/KDD-96.final.frame.pdf>`_. In: Proceedings of the 2nd International Conference on Knowledge Discovery and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017). :doi:`"DBSCAN revisited, revisited: why and how you should (still) use DBSCAN." <10.1145/3068335>` ACM Transactions on Database Systems (TODS), 42(3), 19. Examples -------- >>> from sklearn.cluster import DBSCAN >>> import numpy as np >>> X = np.array([[1, 2], [2, 2], [2, 3], ... [8, 7], [8, 8], [25, 80]]) >>> clustering = DBSCAN(eps=3, min_samples=2).fit(X) >>> clustering.labels_ array([ 0, 0, 0, 1, 1, -1]) >>> clustering DBSCAN(eps=3, min_samples=2) """ _parameter_constraints: dict = { "eps": [Interval(Real, 0.0, None, closed="neither")], "min_samples": [Interval(Integral, 1, None, closed="left")], "metric": [
""" DBSCAN: Density-Based Spatial Clustering of Applications with Noise """ # Author: Robert Layton <robertlayton@gmail.com> # Joel Nothman <joel.nothman@gmail.com> # Lars Buitinck # # License: BSD 3 clause def dbscan( X, eps=0.5, *, min_samples=5, metric="minkowski", metric_params=None, algorithm="auto", leaf_size=30, p=2, sample_weight=None, n_jobs=None, ): """Perform DBSCAN clustering from vector array or distance matrix. Read more in the :ref:`User Guide <dbscan>`. Parameters ---------- X : {array-like, sparse (CSR) matrix} of shape (n_samples, n_features) or \ (n_samples, n_samples) A feature array, or array of distances between samples if ``metric='precomputed'``. eps : float, default=0.5 The maximum distance between two samples for one to be considered as in the neighborhood of the other. This is not a maximum bound on the distances of points within a cluster. This is the most important DBSCAN parameter to choose appropriately for your data set and distance function. min_samples : int, default=5 The number of samples (or total weight) in a neighborhood for a point to be considered as a core point. This includes the point itself. metric : str or callable, default='minkowski' The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by :func:`sklearn.metrics.pairwise_distances` for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square during fit. X may be a :term:`sparse graph <sparse graph>`, in which case only "nonzero" elements may be considered neighbors. metric_params : dict, default=None Additional keyword arguments for the metric function. .. versionadded:: 0.19 algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' The algorithm to be used by the NearestNeighbors module to compute pointwise distances and find nearest neighbors. See NearestNeighbors module documentation for details. leaf_size : int, default=30 Leaf size passed to BallTree or cKDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. p : float, default=2 The power of the Minkowski metric to be used to calculate distance between points. sample_weight : array-like of shape (n_samples,), default=None Weight of each sample, such that a sample with a weight of at least ``min_samples`` is by itself a core sample; a sample with negative weight may inhibit its eps-neighbor from being core. Note that weights are absolute, and default to 1. n_jobs : int, default=None The number of parallel jobs to run for neighbors search. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. If precomputed distance are used, parallel execution is not available and thus n_jobs will have no effect. Returns ------- core_samples : ndarray of shape (n_core_samples,) Indices of core samples. labels : ndarray of shape (n_samples,) Cluster labels for each point. Noisy samples are given the label -1. See Also -------- DBSCAN : An estimator interface for this clustering algorithm. OPTICS : A similar estimator interface clustering at multiple values of eps. Our implementation is optimized for memory usage. Notes ----- For an example, see :ref:`examples/cluster/plot_dbscan.py <sphx_glr_auto_examples_cluster_plot_dbscan.py>`. This implementation bulk-computes all neighborhood queries, which increases the memory complexity to O(n.d) where d is the average number of neighbors, while original DBSCAN had memory complexity O(n). It may attract a higher memory complexity when querying these nearest neighborhoods, depending on the ``algorithm``. One way to avoid the query complexity is to pre-compute sparse neighborhoods in chunks using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``, then using ``metric='precomputed'`` here. Another way to reduce memory and computation time is to remove (near-)duplicate points and use ``sample_weight`` instead. :class:`~sklearn.cluster.OPTICS` provides a similar clustering with lower memory usage. References ---------- Ester, M., H. P. Kriegel, J. Sander, and X. Xu, `"A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise" <https://www.dbs.ifi.lmu.de/Publikationen/Papers/KDD-96.final.frame.pdf>`_. In: Proceedings of the 2nd International Conference on Knowledge Discovery and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017). :doi:`"DBSCAN revisited, revisited: why and how you should (still) use DBSCAN." <10.1145/3068335>` ACM Transactions on Database Systems (TODS), 42(3), 19. """ est = DBSCAN( eps=eps, min_samples=min_samples, metric=metric, metric_params=metric_params, algorithm=algorithm, leaf_size=leaf_size, p=p, n_jobs=n_jobs, ) est.fit(X, sample_weight=sample_weight) return est.core_sample_indices_, est.labels_ class DBSCAN(ClusterMixin, BaseEstimator): """Perform DBSCAN clustering from vector array or distance matrix. DBSCAN - Density-Based Spatial Clustering of Applications with Noise. Finds core samples of high density and expands clusters from them. Good for data which contains clusters of similar density. The worst case memory complexity of DBSCAN is :math:`O({n}^2)`, which can occur when the `eps` param is large and `min_samples` is low. Read more in the :ref:`User Guide <dbscan>`. Parameters ---------- eps : float, default=0.5 The maximum distance between two samples for one to be considered as in the neighborhood of the other. This is not a maximum bound on the distances of points within a cluster. This is the most important DBSCAN parameter to choose appropriately for your data set and distance function. min_samples : int, default=5 The number of samples (or total weight) in a neighborhood for a point to be considered as a core point. This includes the point itself. If `min_samples` is set to a higher value, DBSCAN will find denser clusters, whereas if it is set to a lower value, the found clusters will be more sparse. metric : str, or callable, default='euclidean' The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by :func:`sklearn.metrics.pairwise_distances` for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square. X may be a :term:`sparse graph`, in which case only "nonzero" elements may be considered neighbors for DBSCAN. .. versionadded:: 0.17 metric *precomputed* to accept precomputed sparse matrix. metric_params : dict, default=None Additional keyword arguments for the metric function. .. versionadded:: 0.19 algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' The algorithm to be used by the NearestNeighbors module to compute pointwise distances and find nearest neighbors. See NearestNeighbors module documentation for details. leaf_size : int, default=30 Leaf size passed to BallTree or cKDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. p : float, default=None The power of the Minkowski metric to be used to calculate distance between points. If None, then ``p=2`` (equivalent to the Euclidean distance). n_jobs : int, default=None The number of parallel jobs to run. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. Attributes ---------- core_sample_indices_ : ndarray of shape (n_core_samples,) Indices of core samples. components_ : ndarray of shape (n_core_samples, n_features) Copy of each core sample found by training. labels_ : ndarray of shape (n_samples) Cluster labels for each point in the dataset given to fit(). Noisy samples are given the label -1. n_features_in_ : int Number of features seen during :term:`fit`. .. versionadded:: 0.24 feature_names_in_ : ndarray of shape (`n_features_in_`,) Names of features seen during :term:`fit`. Defined only when `X` has feature names that are all strings. .. versionadded:: 1.0 See Also -------- OPTICS : A similar clustering at multiple values of eps. Our implementation is optimized for memory usage. Notes ----- For an example, see :ref:`examples/cluster/plot_dbscan.py <sphx_glr_auto_examples_cluster_plot_dbscan.py>`. This implementation bulk-computes all neighborhood queries, which increases the memory complexity to O(n.d) where d is the average number of neighbors, while original DBSCAN had memory complexity O(n). It may attract a higher memory complexity when querying these nearest neighborhoods, depending on the ``algorithm``. One way to avoid the query complexity is to pre-compute sparse neighborhoods in chunks using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``, then using ``metric='precomputed'`` here. Another way to reduce memory and computation time is to remove (near-)duplicate points and use ``sample_weight`` instead. :class:`~sklearn.cluster.OPTICS` provides a similar clustering with lower memory usage. References ---------- Ester, M., H. P. Kriegel, J. Sander, and X. Xu, `"A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise" <https://www.dbs.ifi.lmu.de/Publikationen/Papers/KDD-96.final.frame.pdf>`_. In: Proceedings of the 2nd International Conference on Knowledge Discovery and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017). :doi:`"DBSCAN revisited, revisited: why and how you should (still) use DBSCAN." <10.1145/3068335>` ACM Transactions on Database Systems (TODS), 42(3), 19. Examples -------- >>> from sklearn.cluster import DBSCAN >>> import numpy as np >>> X = np.array([[1, 2], [2, 2], [2, 3], ... [8, 7], [8, 8], [25, 80]]) >>> clustering = DBSCAN(eps=3, min_samples=2).fit(X) >>> clustering.labels_ array([ 0, 0, 0, 1, 1, -1]) >>> clustering DBSCAN(eps=3, min_samples=2) """ _parameter_constraints: dict = { "eps": [Interval(Real, 0.0, None, closed="neither")], "min_samples": [Interval(Integral, 1, None, closed="left")], "metric": [
StrOptions(set(_VALID_METRICS) | {"precomputed"}),
3
2023-10-07 13:19:48+00:00
16k
hellloxiaotian/KDNet
test_ccpd.py
[ { "identifier": "attempt_load", "path": "models/experimental.py", "snippet": "def attempt_load(weights, map_location=None):\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n model = Ensemble()\n # print('weights', weights) # /runs/train/yolov7_distill...
import argparse import json import os import numpy as np import torch import yaml from pathlib import Path from threading import Thread from tqdm import tqdm from models.experimental import attempt_load from utils.datasets import create_dataloader from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \ box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr from utils.metrics import ap_per_class, ConfusionMatrix from utils.plots import plot_images, output_to_target, plot_study_txt from utils.torch_utils import select_device, time_synchronized, TracedModel from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
10,846
augment=False, verbose=False, model=None, dataloader=None, save_dir=Path(''), # for saving images save_txt=False, # for auto-labelling save_hybrid=False, # for hybrid auto-labelling save_conf=False, # save auto-label confidences plots=True, wandb_logger=None, compute_loss=None, half_precision=True, trace=False, is_coco=False, v5_metric=False): # Initialize/load model and set device training = model is not None if training: # called by train.py device = next(model.parameters()).device # get model device else: # called directly set_logging() device = select_device(opt.device, batch_size=batch_size) # Directories save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = attempt_load(weights, map_location=device) # load FP32 model gs = max(int(model.stride.max()), 32) # grid size (max stride) imgsz = check_img_size(imgsz, s=gs) # check img_size if trace: model = TracedModel(model, device, imgsz) # Half half = device.type != 'cpu' and half_precision # half precision only supported on CUDA if half: model.half() # Configure model.eval() if isinstance(data, str): is_coco = data.endswith('coco.yaml') with open(data) as f: data = yaml.load(f, Loader=yaml.SafeLoader) check_dataset(data) # check nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() # Logging log_imgs = 0 if wandb_logger and wandb_logger.wandb: log_imgs = min(wandb_logger.log_imgs, 100) # Dataloader if not training: if device.type != 'cpu': model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once task = opt.task if opt.task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, gs, opt, pad=0.5, rect=True, prefix=colorstr(f'{task}: '))[0] if v5_metric: print("Testing with YOLOv5 AP metric...") seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} coco91class = coco80_to_coco91_class() s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): img = img.to(device, non_blocking=True) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width with torch.no_grad(): # Run model t = time_synchronized() out, train_out = model(img, augment=augment) # inference and training outputs t0 += time_synchronized() - t # Compute loss if compute_loss: loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls # Run NMS targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t = time_synchronized() out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True) t1 += time_synchronized() - t # Statistics per image for si, pred in enumerate(out): labels = targets[targets[:, 0] == si, 1:] nl = len(labels) tcls = labels[:, 0].tolist() if nl else [] # target class path = Path(paths[si]) seen += 1 if len(pred) == 0: if nl: stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) continue # Predictions predn = pred.clone() scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred # Append to text file if save_txt: gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist():
def test(data, weights=None, batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, # for NMS save_json=False, single_cls=False, augment=False, verbose=False, model=None, dataloader=None, save_dir=Path(''), # for saving images save_txt=False, # for auto-labelling save_hybrid=False, # for hybrid auto-labelling save_conf=False, # save auto-label confidences plots=True, wandb_logger=None, compute_loss=None, half_precision=True, trace=False, is_coco=False, v5_metric=False): # Initialize/load model and set device training = model is not None if training: # called by train.py device = next(model.parameters()).device # get model device else: # called directly set_logging() device = select_device(opt.device, batch_size=batch_size) # Directories save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = attempt_load(weights, map_location=device) # load FP32 model gs = max(int(model.stride.max()), 32) # grid size (max stride) imgsz = check_img_size(imgsz, s=gs) # check img_size if trace: model = TracedModel(model, device, imgsz) # Half half = device.type != 'cpu' and half_precision # half precision only supported on CUDA if half: model.half() # Configure model.eval() if isinstance(data, str): is_coco = data.endswith('coco.yaml') with open(data) as f: data = yaml.load(f, Loader=yaml.SafeLoader) check_dataset(data) # check nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() # Logging log_imgs = 0 if wandb_logger and wandb_logger.wandb: log_imgs = min(wandb_logger.log_imgs, 100) # Dataloader if not training: if device.type != 'cpu': model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once task = opt.task if opt.task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, gs, opt, pad=0.5, rect=True, prefix=colorstr(f'{task}: '))[0] if v5_metric: print("Testing with YOLOv5 AP metric...") seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} coco91class = coco80_to_coco91_class() s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): img = img.to(device, non_blocking=True) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width with torch.no_grad(): # Run model t = time_synchronized() out, train_out = model(img, augment=augment) # inference and training outputs t0 += time_synchronized() - t # Compute loss if compute_loss: loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls # Run NMS targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t = time_synchronized() out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True) t1 += time_synchronized() - t # Statistics per image for si, pred in enumerate(out): labels = targets[targets[:, 0] == si, 1:] nl = len(labels) tcls = labels[:, 0].tolist() if nl else [] # target class path = Path(paths[si]) seen += 1 if len(pred) == 0: if nl: stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) continue # Predictions predn = pred.clone() scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred # Append to text file if save_txt: gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
10
2023-10-08 13:05:58+00:00
16k
Significant-Gravitas/autostandup
bot.py
[ { "identifier": "StreaksDB", "path": "streaks/streaks_db.py", "snippet": "class StreaksDB(BaseDB):\n \"\"\"\n StreaksDB class handles all operations related to the 'streaks' table.\n Inherits from the BaseDB class.\n \"\"\"\n\n def __init__(self, host, user, password, database, port):\n ...
import os import pytz import asyncio import openai import requests from typing import List from dotenv import load_dotenv from datetime import datetime, timedelta from multiprocessing import Process from streaks.streaks_db import StreaksDB from team_members.team_member_db import TeamMemberDB from updates.updates_db import UpdatesDB from weekly_posts.weekly_posts_db import WeeklyPostsDB from streaks.streaks_manager import StreaksManager from team_members.team_member_manager import TeamMemberManager from updates.updates_manager import UpdatesManager from weekly_posts.weekly_post_manager import WeeklyPostManager from scheduler import Scheduler from team_members.team_member import TeamMember from discord.ext import commands, tasks from discord import Intents, DMChannel from flask import Flask from asyncio import Task, ensure_future, CancelledError
12,661
# Import required modules app = Flask(__name__) # Load environment variables from the .env file load_dotenv() # Retrieve bot, guild, and channel tokens from environment variables BOT_TOKEN = os.getenv('DISCORD_BOT_TOKEN') GUILD_TOKEN = int(os.getenv('DISCORD_GUILD_TOKEN')) CHANNEL_TOKEN = int(os.getenv('DISCORD_CHANNEL_TOKEN')) ADMIN_DISCORD_ID = int(os.getenv('ADMIN_DISCORD_ID')) # Retrieve database credentials from environment variables MYSQL_HOST = os.getenv('MYSQL_HOST') MYSQL_USER = os.getenv('MYSQL_USER') MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD') MYSQL_DB = os.getenv('MYSQL_DB') MYSQL_PORT = os.getenv('MYSQL_PORT') ORG_NAME = os.getenv('GITHUB_ORG_NAME') ORG_TOKEN = os.getenv('GITHUB_ORG_TOKEN') OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') # Initialize bot with default intents intents = Intents.default() intents.members = True intents.message_content = True bot = commands.Bot(command_prefix='!', intents=intents) openai.api_key = OPENAI_API_KEY # TODO: Remove these globals streaks_manager = None weekly_post_manager = None team_member_manager = None updates_manager = None scheduler = None ongoing_status_requests = {} THUMBS_UP_EMOJI = "👍" PENCIL_EMOJI = "✏️" REPORT_SUBMISSION_EMOJI = '📝'
# Import required modules app = Flask(__name__) # Load environment variables from the .env file load_dotenv() # Retrieve bot, guild, and channel tokens from environment variables BOT_TOKEN = os.getenv('DISCORD_BOT_TOKEN') GUILD_TOKEN = int(os.getenv('DISCORD_GUILD_TOKEN')) CHANNEL_TOKEN = int(os.getenv('DISCORD_CHANNEL_TOKEN')) ADMIN_DISCORD_ID = int(os.getenv('ADMIN_DISCORD_ID')) # Retrieve database credentials from environment variables MYSQL_HOST = os.getenv('MYSQL_HOST') MYSQL_USER = os.getenv('MYSQL_USER') MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD') MYSQL_DB = os.getenv('MYSQL_DB') MYSQL_PORT = os.getenv('MYSQL_PORT') ORG_NAME = os.getenv('GITHUB_ORG_NAME') ORG_TOKEN = os.getenv('GITHUB_ORG_TOKEN') OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') # Initialize bot with default intents intents = Intents.default() intents.members = True intents.message_content = True bot = commands.Bot(command_prefix='!', intents=intents) openai.api_key = OPENAI_API_KEY # TODO: Remove these globals streaks_manager = None weekly_post_manager = None team_member_manager = None updates_manager = None scheduler = None ongoing_status_requests = {} THUMBS_UP_EMOJI = "👍" PENCIL_EMOJI = "✏️" REPORT_SUBMISSION_EMOJI = '📝'
async def weekly_state_reset(weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, team_members: List[TeamMember]):
7
2023-10-12 02:01:46+00:00
16k
azuline/rose
rose/tracks_test.py
[ { "identifier": "AudioTags", "path": "rose/audiotags.py", "snippet": "class AudioTags:\n id: str | None\n release_id: str | None\n title: str | None\n year: int | None\n tracknumber: str | None\n tracktotal: int | None\n discnumber: str | None\n disctotal: int | None\n album: ...
import json import pytest from pathlib import Path from rose.audiotags import AudioTags from rose.config import Config from rose.rule_parser import MetadataAction, MetadataMatcher from rose.tracks import dump_track, dump_tracks, run_actions_on_track
12,348
def test_run_action_on_track(config: Config, source_dir: Path) -> None: action = MetadataAction.parse("tracktitle::replace:Bop") af = AudioTags.from_file(source_dir / "Test Release 2" / "01.m4a") assert af.id is not None
def test_run_action_on_track(config: Config, source_dir: Path) -> None: action = MetadataAction.parse("tracktitle::replace:Bop") af = AudioTags.from_file(source_dir / "Test Release 2" / "01.m4a") assert af.id is not None
run_actions_on_track(config, af.id, [action])
6
2023-10-09 14:42:23+00:00
16k
grainseed/monitask
sam/segment_anything/build_sam.py
[ { "identifier": "Sam", "path": "sam/segment_anything/modeling/sam.py", "snippet": "class Sam(nn.Module):\r\n mask_threshold: float = 0.0\r\n image_format: str = \"RGB\"\r\n\r\n def __init__(\r\n self,\r\n image_encoder: ImageEncoderViT,\r\n prompt_encoder: PromptEncoder,\r\...
import torch from functools import partial from .modeling import ImageEncoderViT, MaskDecoder,MaskDecoderHQ, PromptEncoder, Sam, TwoWayTransformer, TinyViT
11,297
# This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. def build_sam_vit_h(checkpoint=None,device="cpu"): return _build_sam( encoder_embed_dim=1280, encoder_depth=32, encoder_num_heads=16, encoder_global_attn_indexes=[7, 15, 23, 31], checkpoint=checkpoint, ) build_sam = build_sam_vit_h def build_sam_vit_l(checkpoint=None,device="cpu"): return _build_sam( encoder_embed_dim=1024, encoder_depth=24, encoder_num_heads=16, encoder_global_attn_indexes=[5, 11, 17, 23], checkpoint=checkpoint, ) def build_sam_vit_b(checkpoint=None,device="cpu"): return _build_sam( encoder_embed_dim=768, encoder_depth=12, encoder_num_heads=12, encoder_global_attn_indexes=[2, 5, 8, 11], checkpoint=checkpoint, ) def build_sam_vit_t(checkpoint=None,device="cpu"): prompt_embed_dim = 256 image_size = 1024 vit_patch_size = 16 image_embedding_size = image_size // vit_patch_size mobile_sam = Sam( image_encoder=TinyViT(img_size=1024, in_chans=3, num_classes=1000, embed_dims=[64, 128, 160, 320], depths=[2, 2, 6, 2], num_heads=[2, 4, 5, 10], window_sizes=[7, 7, 14, 7], mlp_ratio=4., drop_rate=0., drop_path_rate=0.0, use_checkpoint=False, mbconv_expand_ratio=4.0, local_conv_size=3, layer_lr_decay=0.8 ), prompt_encoder=PromptEncoder( embed_dim=prompt_embed_dim, image_embedding_size=(image_embedding_size, image_embedding_size), input_image_size=(image_size, image_size), mask_in_chans=16, ), mask_decoder=MaskDecoderHQ( num_multimask_outputs=3, transformer=TwoWayTransformer( depth=2, embedding_dim=prompt_embed_dim, mlp_dim=2048, num_heads=8, ), transformer_dim=prompt_embed_dim, iou_head_depth=3, iou_head_hidden_dim=256, vit_dim=160, ), pixel_mean=[123.675, 116.28, 103.53], pixel_std=[58.395, 57.12, 57.375], ) mobile_sam.eval() if checkpoint is not None: with open(checkpoint, "rb") as f: device = "cuda" if torch.cuda.is_available() else "cpu" state_dict = torch.load(f, map_location=device) info = mobile_sam.load_state_dict(state_dict, strict=False) #print(info) for n, p in mobile_sam.named_parameters(): if 'hf_token' not in n and 'hf_mlp' not in n and 'compress_vit_feat' not in n and 'embedding_encoder' not in n and 'embedding_maskfeature' not in n: p.requires_grad = False return mobile_sam def build_mobile_sam(checkpoint=None,device="cpu"): prompt_embed_dim = 256 image_size = 1024 vit_patch_size = 16 image_embedding_size = image_size // vit_patch_size mobile_sam = Sam( image_encoder=TinyViT(img_size=1024, in_chans=3, num_classes=1000, embed_dims=[64, 128, 160, 320], depths=[2, 2, 6, 2], num_heads=[2, 4, 5, 10], window_sizes=[7, 7, 14, 7], mlp_ratio=4., drop_rate=0., drop_path_rate=0.0, use_checkpoint=False, mbconv_expand_ratio=4.0, local_conv_size=3, layer_lr_decay=0.8 ), prompt_encoder=PromptEncoder( embed_dim=prompt_embed_dim, image_embedding_size=(image_embedding_size, image_embedding_size), input_image_size=(image_size, image_size), mask_in_chans=16, ),
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. def build_sam_vit_h(checkpoint=None,device="cpu"): return _build_sam( encoder_embed_dim=1280, encoder_depth=32, encoder_num_heads=16, encoder_global_attn_indexes=[7, 15, 23, 31], checkpoint=checkpoint, ) build_sam = build_sam_vit_h def build_sam_vit_l(checkpoint=None,device="cpu"): return _build_sam( encoder_embed_dim=1024, encoder_depth=24, encoder_num_heads=16, encoder_global_attn_indexes=[5, 11, 17, 23], checkpoint=checkpoint, ) def build_sam_vit_b(checkpoint=None,device="cpu"): return _build_sam( encoder_embed_dim=768, encoder_depth=12, encoder_num_heads=12, encoder_global_attn_indexes=[2, 5, 8, 11], checkpoint=checkpoint, ) def build_sam_vit_t(checkpoint=None,device="cpu"): prompt_embed_dim = 256 image_size = 1024 vit_patch_size = 16 image_embedding_size = image_size // vit_patch_size mobile_sam = Sam( image_encoder=TinyViT(img_size=1024, in_chans=3, num_classes=1000, embed_dims=[64, 128, 160, 320], depths=[2, 2, 6, 2], num_heads=[2, 4, 5, 10], window_sizes=[7, 7, 14, 7], mlp_ratio=4., drop_rate=0., drop_path_rate=0.0, use_checkpoint=False, mbconv_expand_ratio=4.0, local_conv_size=3, layer_lr_decay=0.8 ), prompt_encoder=PromptEncoder( embed_dim=prompt_embed_dim, image_embedding_size=(image_embedding_size, image_embedding_size), input_image_size=(image_size, image_size), mask_in_chans=16, ), mask_decoder=MaskDecoderHQ( num_multimask_outputs=3, transformer=TwoWayTransformer( depth=2, embedding_dim=prompt_embed_dim, mlp_dim=2048, num_heads=8, ), transformer_dim=prompt_embed_dim, iou_head_depth=3, iou_head_hidden_dim=256, vit_dim=160, ), pixel_mean=[123.675, 116.28, 103.53], pixel_std=[58.395, 57.12, 57.375], ) mobile_sam.eval() if checkpoint is not None: with open(checkpoint, "rb") as f: device = "cuda" if torch.cuda.is_available() else "cpu" state_dict = torch.load(f, map_location=device) info = mobile_sam.load_state_dict(state_dict, strict=False) #print(info) for n, p in mobile_sam.named_parameters(): if 'hf_token' not in n and 'hf_mlp' not in n and 'compress_vit_feat' not in n and 'embedding_encoder' not in n and 'embedding_maskfeature' not in n: p.requires_grad = False return mobile_sam def build_mobile_sam(checkpoint=None,device="cpu"): prompt_embed_dim = 256 image_size = 1024 vit_patch_size = 16 image_embedding_size = image_size // vit_patch_size mobile_sam = Sam( image_encoder=TinyViT(img_size=1024, in_chans=3, num_classes=1000, embed_dims=[64, 128, 160, 320], depths=[2, 2, 6, 2], num_heads=[2, 4, 5, 10], window_sizes=[7, 7, 14, 7], mlp_ratio=4., drop_rate=0., drop_path_rate=0.0, use_checkpoint=False, mbconv_expand_ratio=4.0, local_conv_size=3, layer_lr_decay=0.8 ), prompt_encoder=PromptEncoder( embed_dim=prompt_embed_dim, image_embedding_size=(image_embedding_size, image_embedding_size), input_image_size=(image_size, image_size), mask_in_chans=16, ),
mask_decoder=MaskDecoder(
3
2023-10-14 13:45:54+00:00
16k
zhaoyizhou1123/mbrcsl
examples/pointmaze/run_combo_maze.py
[ { "identifier": "MLP", "path": "offlinerlkit/nets/mlp.py", "snippet": "class MLP(nn.Module):\n def __init__(\n self,\n input_dim: int,\n hidden_dims: Union[List[int], Tuple[int]],\n output_dim: Optional[int] = None,\n activation: nn.Module = nn.ReLU,\n dropou...
import argparse import random import datetime import numpy as np import torch from offlinerlkit.nets import MLP from offlinerlkit.modules import ActorProb, Critic, TanhDiagGaussian, EnsembleDynamicsModel from offlinerlkit.dynamics import EnsembleDynamics from offlinerlkit.utils.scaler import StandardScaler from offlinerlkit.utils.termination_fns import termination_fn_default from offlinerlkit.buffer import ReplayBuffer from offlinerlkit.utils.logger import Logger, make_log_dirs from offlinerlkit.policy_trainer import MBPolicyTrainer from offlinerlkit.policy import COMBOPolicy from offlinerlkit.utils.none_or_str import none_or_str from envs.pointmaze.create_maze_dataset import create_env_dataset from envs.pointmaze.utils.trajectory import get_pointmaze_dataset from envs.pointmaze.utils.maze_utils import PointMazeObsWrapper
14,130
def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--algo_name", type=str, default="combo") parser.add_argument("--task", type=str, default="pointmaze") # Self-constructed environment parser.add_argument("--last_eval", action="store_true") # env config (general) parser.add_argument('--data_dir', type=str, required=True) parser.add_argument('--horizon', type=int, default=200, help="max path length for pickplace") # env config (pointmaze) parser.add_argument('--maze_config_file', type=str, default='envs/pointmaze/config/maze_default.json') parser.add_argument('--data_file', type=str, default='pointmaze.dat') parser.add_argument("--seed", type=int, default=0) parser.add_argument("--actor-lr", type=float, default=1e-4) parser.add_argument("--critic-lr", type=float, default=3e-4) parser.add_argument("--hidden-dims", type=int, nargs='*', default=[256, 256, 256]) parser.add_argument("--gamma", type=float, default=0.99) parser.add_argument("--tau", type=float, default=0.005) parser.add_argument("--alpha", type=float, default=0.2) parser.add_argument("--auto-alpha", default=True) parser.add_argument("--target-entropy", type=int, default=None) parser.add_argument("--alpha-lr", type=float, default=1e-4) parser.add_argument("--cql-weight", type=float, default=1.0) parser.add_argument("--temperature", type=float, default=1.0) parser.add_argument("--max-q-backup", type=bool, default=False) parser.add_argument("--deterministic-backup", type=bool, default=True) parser.add_argument("--with-lagrange", type=bool, default=False) parser.add_argument("--lagrange-threshold", type=float, default=10.0) parser.add_argument("--cql-alpha-lr", type=float, default=3e-4) parser.add_argument("--num-repeat-actions", type=int, default=10) parser.add_argument("--uniform-rollout", type=bool, default=False) parser.add_argument("--rho-s", type=str, default="mix", choices=["model", "mix"]) parser.add_argument("--dynamics-lr", type=float, default=1e-3) parser.add_argument("--dynamics-hidden-dims", type=int, nargs='*', default=[200, 200, 200, 200]) parser.add_argument("--dynamics-weight-decay", type=float, nargs='*', default=[2.5e-5, 5e-5, 7.5e-5, 7.5e-5, 1e-4]) parser.add_argument("--n-ensemble", type=int, default=7) parser.add_argument("--n-elites", type=int, default=5) parser.add_argument("--rollout-freq", type=int, default=1000) parser.add_argument("--rollout-batch-size", type=int, default=50000) parser.add_argument("--rollout-length", type=int, default=5) parser.add_argument("--model-retain-epochs", type=int, default=5) parser.add_argument("--real-ratio", type=float, default=0.5) parser.add_argument("--load-dynamics-path", type=none_or_str, default=None) parser.add_argument("--epoch", type=int, default=100) parser.add_argument("--step-per-epoch", type=int, default=1000) parser.add_argument("--eval_episodes", type=int, default=10) parser.add_argument("--batch-size", type=int, default=256) parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu") return parser.parse_args() def train(args=get_args()): # seed random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True # create env and dataset if args.task == 'pointmaze': env, trajs = create_env_dataset(args) env = PointMazeObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) dataset, _, _ = get_pointmaze_dataset(trajs) else: raise NotImplementedError env.reset(seed=args.seed) # create policy model actor_backbone = MLP(input_dim=np.prod(args.obs_shape), hidden_dims=args.hidden_dims) critic1_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims) critic2_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims)
def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--algo_name", type=str, default="combo") parser.add_argument("--task", type=str, default="pointmaze") # Self-constructed environment parser.add_argument("--last_eval", action="store_true") # env config (general) parser.add_argument('--data_dir', type=str, required=True) parser.add_argument('--horizon', type=int, default=200, help="max path length for pickplace") # env config (pointmaze) parser.add_argument('--maze_config_file', type=str, default='envs/pointmaze/config/maze_default.json') parser.add_argument('--data_file', type=str, default='pointmaze.dat') parser.add_argument("--seed", type=int, default=0) parser.add_argument("--actor-lr", type=float, default=1e-4) parser.add_argument("--critic-lr", type=float, default=3e-4) parser.add_argument("--hidden-dims", type=int, nargs='*', default=[256, 256, 256]) parser.add_argument("--gamma", type=float, default=0.99) parser.add_argument("--tau", type=float, default=0.005) parser.add_argument("--alpha", type=float, default=0.2) parser.add_argument("--auto-alpha", default=True) parser.add_argument("--target-entropy", type=int, default=None) parser.add_argument("--alpha-lr", type=float, default=1e-4) parser.add_argument("--cql-weight", type=float, default=1.0) parser.add_argument("--temperature", type=float, default=1.0) parser.add_argument("--max-q-backup", type=bool, default=False) parser.add_argument("--deterministic-backup", type=bool, default=True) parser.add_argument("--with-lagrange", type=bool, default=False) parser.add_argument("--lagrange-threshold", type=float, default=10.0) parser.add_argument("--cql-alpha-lr", type=float, default=3e-4) parser.add_argument("--num-repeat-actions", type=int, default=10) parser.add_argument("--uniform-rollout", type=bool, default=False) parser.add_argument("--rho-s", type=str, default="mix", choices=["model", "mix"]) parser.add_argument("--dynamics-lr", type=float, default=1e-3) parser.add_argument("--dynamics-hidden-dims", type=int, nargs='*', default=[200, 200, 200, 200]) parser.add_argument("--dynamics-weight-decay", type=float, nargs='*', default=[2.5e-5, 5e-5, 7.5e-5, 7.5e-5, 1e-4]) parser.add_argument("--n-ensemble", type=int, default=7) parser.add_argument("--n-elites", type=int, default=5) parser.add_argument("--rollout-freq", type=int, default=1000) parser.add_argument("--rollout-batch-size", type=int, default=50000) parser.add_argument("--rollout-length", type=int, default=5) parser.add_argument("--model-retain-epochs", type=int, default=5) parser.add_argument("--real-ratio", type=float, default=0.5) parser.add_argument("--load-dynamics-path", type=none_or_str, default=None) parser.add_argument("--epoch", type=int, default=100) parser.add_argument("--step-per-epoch", type=int, default=1000) parser.add_argument("--eval_episodes", type=int, default=10) parser.add_argument("--batch-size", type=int, default=256) parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu") return parser.parse_args() def train(args=get_args()): # seed random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True # create env and dataset if args.task == 'pointmaze': env, trajs = create_env_dataset(args) env = PointMazeObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) dataset, _, _ = get_pointmaze_dataset(trajs) else: raise NotImplementedError env.reset(seed=args.seed) # create policy model actor_backbone = MLP(input_dim=np.prod(args.obs_shape), hidden_dims=args.hidden_dims) critic1_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims) critic2_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims)
dist = TanhDiagGaussian(
3
2023-10-11 08:36:06+00:00
16k
wilhelmagren/finq
finq/portfolio.py
[ { "identifier": "Asset", "path": "finq/asset.py", "snippet": "class Asset(object):\n \"\"\" \"\"\"\n\n def __init__(\n self,\n data: pd.Series,\n name: str,\n *,\n market: Optional[str] = None,\n index_name: Optional[str] = None,\n price_type: str =...
import logging import pandas as pd import numpy as np import scipy.optimize as scipyopt import matplotlib.pyplot as plt from functools import wraps from tqdm import tqdm from finq.asset import Asset from finq.datasets import Dataset from finq.exceptions import ( FinqError, InvalidCombinationOfArgumentsError, InvalidPortfolioWeightsError, ObjectiveFunctionError, PortfolioNotYetOptimizedError, ) from finq.formulas import ( period_returns, sharpe_ratio, weighted_returns, weighted_variance, ) from typing import ( Any, Callable, List, Dict, Tuple, Union, Optional, )
10,835
def period_returns(self, period: int) -> np.ndarray: """ """ return period_returns(self._data, period=period) def daily_returns_mean(self) -> float: """ """ return np.mean(period_returns(self._data, period=1), axis=1) def yearly_returns_mean(self) -> float: """ """ return np.mean(period_returns(self._data, period=self._n_trading_days), axis=1) def period_returns_mean(self, period: int) -> float: """ """ return np.mean(period_returns(self._data, period=period), axis=1) def daily_covariance(self) -> np.ndarray: """ """ return np.cov(period_returns(self._data, period=1), rowvar=True) def yearly_covariance(self) -> np.ndarray: """ """ return np.cov( period_returns(self._data, period=self._n_trading_days), rowvar=True ) def period_covariance(self, period: int) -> np.ndarray: """ """ return np.cov(period_returns(self._data, period=period), rowvar=True) def set_objective_function( self, function: Callable, *args: Tuple[Any, ...], ): """ """ self._objective_function = function self._objective_function_args = args def set_objective_constraints( self, *constraints, ): """ """ self._objective_constraints = [{"type": t, "fun": c} for (t, c) in constraints] def set_objective_bounds( self, bounds: Union[Tuple[int, ...], List[Tuple[int, ...]]], ): """ """ if isinstance(bounds, tuple): bounds = [bounds for _ in range(self._data.shape[0])] self._objective_bounds = bounds def sample_random_portfolios( self, n_samples: int, *, distribution: Union[str, Callable] = "lognormal", **kwargs: Dict[str, Any], ): """ """ if isinstance(distribution, str): distribution = self._weight_initializations.get(distribution, None) if distribution is None: raise ValueError( "You provided a non valid weight initialization distribution." ) portfolios = [] for i in (bar := tqdm(range(n_samples))): if i % 10: bar.set_description( f"Sampling random portfolio {i + 1} from " f"{distribution.__name__} distribution" ) portfolio = distribution(**kwargs) portfolios.append(portfolio / portfolio.sum()) self._random_portfolios = np.transpose(np.concatenate(portfolios, axis=1)) @check_valid_weights def variance(self) -> float: """ """ return weighted_variance( self._weights.T, self.daily_covariance(), ) @check_valid_weights def volatility(self) -> float: """ """ return np.sqrt( weighted_variance( self._weights.T, self.daily_covariance(), ), ) @check_valid_weights def expected_returns(self) -> float: """ """
""" MIT License Copyright (c) 2023 Wilhelm Ågren Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. File created: 2023-10-20 Last updated: 2023-11-10 """ log = logging.getLogger(__name__) class Portfolio(object): """ """ # For a full list of `scipy` optimization methods and references, see the link below. # https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html _supported_optimization_methods = ( "Nelder-Mead", "Powell", "CG", "BFGS", "Newton-CG", "L-BFGS-B", "TNC", "COBYLA", "SLSQP", "trust-constr", "dogleg", "trust-ncg", "trust-exact", "trust-krylov", ) _weight_initializations = { "lognormal": np.random.lognormal, "normal": np.random.normal, "uniform": np.random.uniform, } def __init__( self, data: Union[Dataset, List[Asset], np.ndarray, pd.DataFrame], *, weights: Optional[np.ndarray] = None, names: Optional[Union[Dict[str, str], List[str]]] = None, symbols: Optional[Union[Dict[str, str], List[str]]] = None, confidence_level: float = 0.95, risk_free_rate: float = 5e-3, n_trading_days: int = 252, objective_function: Optional[Callable] = None, objective_function_args: Tuple[Any, ...] = (), objective_bounds: Optional[List[Tuple[int, ...]]] = None, objective_constraints: Optional[Tuple[Dict, ...]] = None, ): """ """ if isinstance(data, Dataset): assets = data.as_assets() data = list(assets.values()) symbols = list(assets.keys()) if not isinstance(data, list): if names is None and symbols is None and not isinstance(data, pd.DataFrame): raise InvalidCombinationOfArgumentsError( "You need to provide the names and ticker symbols of each asset that you " "want to include in your portfolio if the data you provided is neither a " "`list` of `Asset` objects or a `pd.DataFrame`. You can also try " "providing only one of the arguments `names` and `symbols`, but then as " "a dictionary of the form `key=name` `value=symbol`." ) if isinstance(data, list): symbols = [a.name for a in data] data = np.array([a.data for a in data]) if isinstance(data, pd.DataFrame): symbols = data.columns data = data.to_numpy().T if isinstance(names, dict): symbols = list(names.values()) names = list(names.keys()) if isinstance(symbols, dict): names = list(symbols.keys()) symbols = list(symbols.values()) self._data = data self._weights = weights self._names = names self._symbols = symbols self._confidence_level = confidence_level self._risk_free_rate = risk_free_rate self._n_trading_days = n_trading_days self._random_portfolios = None self._objective_function = objective_function self._objective_function_args = objective_function_args self._objective_bounds = objective_bounds self._objective_constraints = objective_constraints def weights_are_normalized(self) -> bool: """ """ return np.allclose(self._weights.sum(), 1.0, rtol=1e-6) def initialize_random_weights( self, distribution: Union[str, Callable], *args: Tuple[Any, ...], **kwargs: Dict[str, Any], ): """ """ if isinstance(distribution, str): distribution = self._weight_initializations.get(distribution, None) if distribution is None: raise ValueError( "You provided a non valid weight initialization distribution." ) weights = distribution(*args, **kwargs) self._weights = weights / weights.sum() def check_valid_weights(func) -> Callable: """ """ @wraps(func) def _check_valid_weights(self, *args, **kwargs) -> Optional[FinqError]: """ """ if self._weights is None: raise PortfolioNotYetOptimizedError( "Portfolio weights are `None`. Perhaps you have not yet optimized it? " ) if not self.weights_are_normalized(): raise InvalidPortfolioWeightsError( "Your portfolio weights are not normalized. Make sure to normalize them " "(they sum to one) before calculating any analytical quantities. " ) return func(self, *args, **kwargs) return _check_valid_weights def daily_returns(self) -> np.ndarray: """ """ return period_returns(self._data, period=1) def yearly_returns(self) -> np.ndarray: """ """ return period_returns(self._data, period=self._n_trading_days) def period_returns(self, period: int) -> np.ndarray: """ """ return period_returns(self._data, period=period) def daily_returns_mean(self) -> float: """ """ return np.mean(period_returns(self._data, period=1), axis=1) def yearly_returns_mean(self) -> float: """ """ return np.mean(period_returns(self._data, period=self._n_trading_days), axis=1) def period_returns_mean(self, period: int) -> float: """ """ return np.mean(period_returns(self._data, period=period), axis=1) def daily_covariance(self) -> np.ndarray: """ """ return np.cov(period_returns(self._data, period=1), rowvar=True) def yearly_covariance(self) -> np.ndarray: """ """ return np.cov( period_returns(self._data, period=self._n_trading_days), rowvar=True ) def period_covariance(self, period: int) -> np.ndarray: """ """ return np.cov(period_returns(self._data, period=period), rowvar=True) def set_objective_function( self, function: Callable, *args: Tuple[Any, ...], ): """ """ self._objective_function = function self._objective_function_args = args def set_objective_constraints( self, *constraints, ): """ """ self._objective_constraints = [{"type": t, "fun": c} for (t, c) in constraints] def set_objective_bounds( self, bounds: Union[Tuple[int, ...], List[Tuple[int, ...]]], ): """ """ if isinstance(bounds, tuple): bounds = [bounds for _ in range(self._data.shape[0])] self._objective_bounds = bounds def sample_random_portfolios( self, n_samples: int, *, distribution: Union[str, Callable] = "lognormal", **kwargs: Dict[str, Any], ): """ """ if isinstance(distribution, str): distribution = self._weight_initializations.get(distribution, None) if distribution is None: raise ValueError( "You provided a non valid weight initialization distribution." ) portfolios = [] for i in (bar := tqdm(range(n_samples))): if i % 10: bar.set_description( f"Sampling random portfolio {i + 1} from " f"{distribution.__name__} distribution" ) portfolio = distribution(**kwargs) portfolios.append(portfolio / portfolio.sum()) self._random_portfolios = np.transpose(np.concatenate(portfolios, axis=1)) @check_valid_weights def variance(self) -> float: """ """ return weighted_variance( self._weights.T, self.daily_covariance(), ) @check_valid_weights def volatility(self) -> float: """ """ return np.sqrt( weighted_variance( self._weights.T, self.daily_covariance(), ), ) @check_valid_weights def expected_returns(self) -> float: """ """
return weighted_returns(self._weights.T, self.daily_returns_mean())
9
2023-10-09 19:02:54+00:00
16k
lmb-freiburg/ldce
scripts/ldce.py
[ { "identifier": "disabled_train", "path": "sampling_helpers.py", "snippet": "def disabled_train(self, mode=True):\n \"\"\"Overwrite model.train with this function to make sure train/eval mode\n does not change anymore.\"\"\"\n return self" }, { "identifier": "get_model", "path": "sa...
import argparse import os import psutil import yaml import copy import random import matplotlib.pyplot as plt import numpy as np import pathlib import torch import hydra import wandb import torchvision import json import sys import regex as re import open_clip from contextlib import nullcontext from torch import autocast from omegaconf import OmegaConf, open_dict from hydra.utils import instantiate from omegaconf import DictConfig, OmegaConf from torchvision import transforms, datasets from torchvision.utils import save_image from sampling_helpers import disabled_train, get_model, _unmap_img, generate_samples from sampling_helpers import load_model_hf from ldm import * from ldm.models.diffusion.cc_ddim import CCMDDIMSampler from data.imagenet_classnames import name_map, openai_imagenet_classes from utils.DecisionDensenetModel import DecisionDensenetModel from utils.preprocessor import Normalizer, CropAndNormalizer, ResizeAndNormalizer, GenericPreprocessing, Crop from utils.vision_language_wrapper import VisionLanguageWrapper from utils.madry_net import MadryNet from utils.dino_linear import LinearClassifier, DINOLinear
13,341
classifier_model = VisionLanguageWrapper(model, tokenizer, prompts) # try running optimization on 224x224 pixel image # transforms_list = [preprocess.transforms[0], preprocess.transforms[1], preprocess.transforms[4]] if cfg.classifier_model.classifier_wrapper: transforms_list = [preprocess.transforms[1], preprocess.transforms[4]] # CenterCrop(224, 224), Normalize classifier_model = GenericPreprocessing(classifier_model, transforms.Compose(transforms_list)) else: raise NotImplementedError return classifier_model def get_dataset(cfg, last_data_idx: int = 0): if "ImageNet" in cfg.data._target_: out_size = 256 transform_list = [ transforms.Resize((out_size, out_size)), transforms.ToTensor() ] transform = transforms.Compose(transform_list) dataset = instantiate(cfg.data, start_sample=cfg.data.start_sample, end_sample=cfg.data.end_sample, transform=transform, restart_idx=last_data_idx) elif "CelebAHQDataset" in cfg.data._target_: dataset = instantiate( cfg.data, image_size=256, data_dir=cfg.data.data_dir, random_crop=False, random_flip=False, partition='test', query_label=cfg.data.query_label, normalize=False, shard=cfg.data.shard, num_shards=cfg.data.num_shards, restart_idx=last_data_idx ) elif "Flowers102" in cfg.data._target_: transform = transforms.Compose([ transforms.Resize((256, 256)), transforms.ToTensor(), ]) dataset = instantiate( cfg.data, shard=cfg.data.shard, num_shards=cfg.data.num_shards, transform=transform, restart_idx=last_data_idx ) elif "OxfordIIIPets" in cfg.data._target_: # try running on 224x224 img def _convert_to_rgb(image): return image.convert('RGB') out_size = 256 transform_list = [ transforms.Resize((out_size, out_size)), # transforms.CenterCrop(out_size), _convert_to_rgb, transforms.ToTensor(), ] transform = transforms.Compose(transform_list) dataset = instantiate( cfg.data, shard=cfg.data.shard, num_shards=cfg.data.num_shards, transform=transform, restart_idx=last_data_idx ) else: raise NotImplementedError return dataset @hydra.main(version_base=None, config_path="../configs/ldce", config_name="v1") def main(cfg : DictConfig) -> None: if "verbose" not in cfg: with open_dict(cfg): cfg.verbose = True if "record_intermediate_results" not in cfg: with open_dict(cfg): cfg.record_intermediate_results = True if "verbose" in cfg and not cfg.verbose: blockPrint() os.makedirs(cfg.output_dir, exist_ok=True) os.chmod(cfg.output_dir, 0o777) if "ImageNet" in cfg.data._target_: out_dir = os.path.join(cfg.output_dir, f"bucket_{cfg.data.start_sample}_{cfg.data.end_sample}") else: out_dir = os.path.join(cfg.output_dir, f"bucket_{cfg.data.shard}_{cfg.data.num_shards}") os.makedirs(out_dir, exist_ok=True) os.chmod(out_dir, 0o777) checkpoint_path = os.path.join(out_dir, "last_saved_id.pth") config = {} if "ImageNet" in cfg.data._target_: run_id = f"{cfg.data.start_sample}_{cfg.data.end_sample}" else: run_id = f"{cfg.data.shard}_{cfg.data.num_shards}" if cfg.resume: print("run ID to resume: ", run_id) else: print("starting new run", run_id) config.update(OmegaConf.to_container(cfg, resolve=True)) print("current run id: ", run_id) last_data_idx = 0 if cfg.resume: # or os.path.isfile(checkpoint_path): resume only if asked to, allow restarts print(f"resuming from {checkpoint_path}") #check if checkpoint exists if not os.path.exists(checkpoint_path): print("checkpoint does not exist! starting from 0 ...") else: checkpoint = torch.load(checkpoint_path)# torch.load(restored_file.name) last_data_idx = checkpoint["last_data_idx"] + 1 if "last_data_idx" in checkpoint else 0 print(f"resuming from batch {last_data_idx}") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # device = torch.device("cpu") # there seems to be a CUDA/autograd instability in gradient computation print(f"using device: {device}") model = get_model(cfg_path=cfg.diffusion_model.cfg_path, ckpt_path = cfg.diffusion_model.ckpt_path).to(device).eval() classifier_model = get_classifier(cfg, device) classifier_model.to(device).eval()
torch.backends.cuda.matmul.allow_tf32 = True # torch.backends.cudnn.benchmark = True try: except: print("Install OpenClip via: pip install open_clip_torch") def set_seed(seed: int = 0): torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) torch.cuda.manual_seed_all(seed) def blockPrint(): sys.stdout = open(os.devnull, 'w') def get_classifier(cfg, device): if "ImageNet" in cfg.data._target_: classifier_name = cfg.classifier_model.name if classifier_name == "robust_resnet50": classifier_model = MadryNet(cfg.classifier_model.ckpt, device) if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper: classifier_model = Crop(classifier_model) else: classifier_model = getattr(torchvision.models, classifier_name)(pretrained=True) if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper: classifier_model = CropAndNormalizer(classifier_model) elif "CelebAHQDataset" in cfg.data._target_: assert cfg.data.query_label in [20, 31, 39], 'Query label MUST be 20 (Gender), 31 (Smile), or 39 (Age) for CelebAHQ' ql = 0 if cfg.data.query_label in [31, 39]: ql = 1 if cfg.data.query_label == 31 else 2 classifier_model = DecisionDensenetModel(3, pretrained=False, query_label=ql) classifier_model.load_state_dict(torch.load(cfg.classifier_model.classifier_path, map_location='cpu')['model_state_dict']) if cfg.classifier_model.classifier_wrapper: classifier_model = Normalizer( classifier_model, [0.5] * 3, [0.5] * 3 ) elif "Flowers102" in cfg.data._target_: # fine-tuned Dino ViT B/8: https://arxiv.org/pdf/2104.14294.pdf dino = torch.hub.load('facebookresearch/dino:main', 'dino_vits8').to(device).eval() dim = dino.embed_dim linear_classifier = LinearClassifier(dim*cfg.classifier_model.n_last_blocks, 102) linear_classifier.load_state_dict(torch.load(cfg.classifier_model.classifier_path, map_location="cpu"), strict=True) linear_classifier = linear_classifier.eval().to(device) classifier_model = DINOLinear(dino, linear_classifier) transforms_list = [transforms.CenterCrop(224), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))] classifier_model = GenericPreprocessing(classifier_model, transforms.Compose(transforms_list)) elif "OxfordIIIPets" in cfg.data._target_: # zero-shot OpenClip: https://arxiv.org/pdf/2212.07143.pdf model, _, preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k') model = model.to(device).eval() tokenizer = open_clip.get_tokenizer('ViT-B-32') # prompts following https://github.com/openai/CLIP/blob/main/data/prompts.md with open("data/pets_idx_to_label.json", "r") as f: pets_idx_to_classname = json.load(f) prompts = [f"a photo of a {label}, a type of pet." for label in pets_idx_to_classname.values()] classifier_model = VisionLanguageWrapper(model, tokenizer, prompts) # try running optimization on 224x224 pixel image # transforms_list = [preprocess.transforms[0], preprocess.transforms[1], preprocess.transforms[4]] if cfg.classifier_model.classifier_wrapper: transforms_list = [preprocess.transforms[1], preprocess.transforms[4]] # CenterCrop(224, 224), Normalize classifier_model = GenericPreprocessing(classifier_model, transforms.Compose(transforms_list)) else: raise NotImplementedError return classifier_model def get_dataset(cfg, last_data_idx: int = 0): if "ImageNet" in cfg.data._target_: out_size = 256 transform_list = [ transforms.Resize((out_size, out_size)), transforms.ToTensor() ] transform = transforms.Compose(transform_list) dataset = instantiate(cfg.data, start_sample=cfg.data.start_sample, end_sample=cfg.data.end_sample, transform=transform, restart_idx=last_data_idx) elif "CelebAHQDataset" in cfg.data._target_: dataset = instantiate( cfg.data, image_size=256, data_dir=cfg.data.data_dir, random_crop=False, random_flip=False, partition='test', query_label=cfg.data.query_label, normalize=False, shard=cfg.data.shard, num_shards=cfg.data.num_shards, restart_idx=last_data_idx ) elif "Flowers102" in cfg.data._target_: transform = transforms.Compose([ transforms.Resize((256, 256)), transforms.ToTensor(), ]) dataset = instantiate( cfg.data, shard=cfg.data.shard, num_shards=cfg.data.num_shards, transform=transform, restart_idx=last_data_idx ) elif "OxfordIIIPets" in cfg.data._target_: # try running on 224x224 img def _convert_to_rgb(image): return image.convert('RGB') out_size = 256 transform_list = [ transforms.Resize((out_size, out_size)), # transforms.CenterCrop(out_size), _convert_to_rgb, transforms.ToTensor(), ] transform = transforms.Compose(transform_list) dataset = instantiate( cfg.data, shard=cfg.data.shard, num_shards=cfg.data.num_shards, transform=transform, restart_idx=last_data_idx ) else: raise NotImplementedError return dataset @hydra.main(version_base=None, config_path="../configs/ldce", config_name="v1") def main(cfg : DictConfig) -> None: if "verbose" not in cfg: with open_dict(cfg): cfg.verbose = True if "record_intermediate_results" not in cfg: with open_dict(cfg): cfg.record_intermediate_results = True if "verbose" in cfg and not cfg.verbose: blockPrint() os.makedirs(cfg.output_dir, exist_ok=True) os.chmod(cfg.output_dir, 0o777) if "ImageNet" in cfg.data._target_: out_dir = os.path.join(cfg.output_dir, f"bucket_{cfg.data.start_sample}_{cfg.data.end_sample}") else: out_dir = os.path.join(cfg.output_dir, f"bucket_{cfg.data.shard}_{cfg.data.num_shards}") os.makedirs(out_dir, exist_ok=True) os.chmod(out_dir, 0o777) checkpoint_path = os.path.join(out_dir, "last_saved_id.pth") config = {} if "ImageNet" in cfg.data._target_: run_id = f"{cfg.data.start_sample}_{cfg.data.end_sample}" else: run_id = f"{cfg.data.shard}_{cfg.data.num_shards}" if cfg.resume: print("run ID to resume: ", run_id) else: print("starting new run", run_id) config.update(OmegaConf.to_container(cfg, resolve=True)) print("current run id: ", run_id) last_data_idx = 0 if cfg.resume: # or os.path.isfile(checkpoint_path): resume only if asked to, allow restarts print(f"resuming from {checkpoint_path}") #check if checkpoint exists if not os.path.exists(checkpoint_path): print("checkpoint does not exist! starting from 0 ...") else: checkpoint = torch.load(checkpoint_path)# torch.load(restored_file.name) last_data_idx = checkpoint["last_data_idx"] + 1 if "last_data_idx" in checkpoint else 0 print(f"resuming from batch {last_data_idx}") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # device = torch.device("cpu") # there seems to be a CUDA/autograd instability in gradient computation print(f"using device: {device}") model = get_model(cfg_path=cfg.diffusion_model.cfg_path, ckpt_path = cfg.diffusion_model.ckpt_path).to(device).eval() classifier_model = get_classifier(cfg, device) classifier_model.to(device).eval()
classifier_model.train = disabled_train
0
2023-10-10 09:40:10+00:00
16k
cpuimage/minSDXLTF
stable_diffusion_xl/stable_diffusion_xl.py
[ { "identifier": "SimpleTokenizer", "path": "stable_diffusion_xl/clip_tokenizer.py", "snippet": "class SimpleTokenizer:\n def __init__(self, bpe_path=None):\n bpe_path = bpe_path or tf.keras.utils.get_file(\n \"bpe_simple_vocab_16e6.txt.gz\",\n \"https://github.com/openai/...
import numpy as np import tensorflow as tf from PIL import Image from scipy.ndimage import correlate1d from .clip_tokenizer import SimpleTokenizer from .diffusion_model import DiffusionXLModel from .image_decoder import ImageDecoder from .image_encoder import ImageEncoder from .long_prompt_weighting import get_weighted_text_embeddings from .scheduler import Scheduler from .text_encoder_laion import TextEncoderLaion, TextEncoderLaionProj from .text_encoder_openai import TextEncoderOpenAi
12,889
seed = int(seed) except: seed = None return tf.random.stateless_normal( (batch_size, self.img_height // 8, self.img_width // 8, 4), seed=[seed, seed], ) else: return tf.random.normal( (batch_size, self.img_height // 8, self.img_width // 8, 4) ) def _get_initial_diffusion_latent(self, batch_size, init_latent=None, init_time=None, seed=None, noise=None): if noise is None: noise = self._get_initial_diffusion_noise(batch_size, seed=seed) if init_latent is None: latent = noise else: latent = self.scheduler.signal_rates[init_time] * np.repeat(init_latent, batch_size, axis=0) + \ self.scheduler.noise_rates[init_time] * noise return latent @staticmethod def _get_pos_ids(): return np.asarray([list(range(MAX_PROMPT_LENGTH))], dtype=np.int32) class StableDiffusionXL(StableDiffusionXLBase): """Keras implementation of Stable Diffusion. Note that the StableDiffusionXL API, as well as the APIs of the sub-components of StableDiffusionXL (e.g. ImageEncoder, DiffusionModel) should be considered unstable at this point. We do not guarantee backwards compatability for future changes to these APIs. Stable Diffusion is a powerful image generation model that can be used, among other things, to generate pictures according to a short text description (called a "prompt"). Arguments: img_height: int, height of the images to generate, in pixel. Note that only multiples of 128 are supported; the value provided will be rounded to the nearest valid value. Defaults to 1024. img_width: int, width of the images to generate, in pixel. Note that only multiples of 128 are supported; the value provided will be rounded to the nearest valid value. Defaults to 1024. jit_compile: bool, whether to compile the underlying models to XLA. This can lead to a significant speedup on some systems. Defaults to False. Example: ```python from stable_diffusion_xl.stable_diffusion_xl import StableDiffusionXL from PIL import Image model = StableDiffusionXL(img_height=1024, img_width=1024, jit_compile=True) img = model.text_to_image( prompt="A beautiful horse running through a field", batch_size=1, # How many images to generate at once num_steps=25, # Number of iterations (controls image quality) seed=123, # Set this to always get the same image from the same prompt ) Image.fromarray(img[0]).save("horse.png") print("saved at horse.png") ``` References: - [About Stable Diffusion](https://stability.ai/blog/stable-diffusion-announcement) - [Original implementation](https://github.com/CompVis/stable-diffusion) """ # noqa: E501 def __init__( self, img_height=1024, img_width=1024, jit_compile=True, unet_ckpt=None, text_encoder_ckpt=None, text_encoder2_ckpt=None, vae_ckpt=None, ): super().__init__(img_height, img_width, jit_compile) self.unet_ckpt = unet_ckpt self.text_encoder_ckpt = text_encoder_ckpt self.text_encoder2_ckpt = text_encoder2_ckpt self.vae_ckpt = vae_ckpt @property def text_encoder_openai(self): """text_encoder returns the text encoder with pretrained weights. Can be overriden for tasks like textual inversion where the text encoder needs to be modified. """ if self._text_encoder_openai is None: self._text_encoder_openai = TextEncoderOpenAi(MAX_PROMPT_LENGTH, ckpt_path=self.text_encoder_ckpt) if self.jit_compile: self._text_encoder_openai.compile(jit_compile=True) return self._text_encoder_openai @property def text_encoder_laion(self): """text_encoder returns the text encoder with pretrained weights. Can be overriden for tasks like textual inversion where the text encoder needs to be modified. """ if self._text_encoder_laion is None: self._text_encoder_laion = TextEncoderLaion(MAX_PROMPT_LENGTH, ckpt_path=self.text_encoder2_ckpt) if self.jit_compile: self._text_encoder_laion.compile(jit_compile=True) return self._text_encoder_laion @property def text_encoder_laion_proj(self): """text_encoder returns the text encoder with pretrained weights. Can be overriden for tasks like textual inversion where the text encoder needs to be modified. """ if self._text_encoder_laion_proj is None:
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Keras implementation of StableDiffusionXL.""" MAX_PROMPT_LENGTH = 77 class StableDiffusionXLBase: """Base class for stable diffusion xl model.""" def __init__(self, img_height=1024, img_width=1024, jit_compile=False, active_lcm=False): self.img_height = img_height self.img_width = img_width # lazy initialize the component models and the tokenizer self._image_encoder = None self._text_encoder_laion = None self._text_encoder_laion_proj = None self._text_encoder_openai = None self._diffusion_model = None self._image_decoder = None self._tokenizer = None self.jit_compile = jit_compile self.active_lcm = active_lcm self.scheduler = Scheduler(active_lcm=active_lcm) def text_to_image( self, prompt, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, seed=None, original_size=None, crops_coords_top_left=(0, 0), target_size=None, guidance_rescale=0.7, callback=None): encoded_text, add_text_embeds = self.encode_text(prompt) return self.generate_image( encoded_text, add_text_embeds, negative_prompt=negative_prompt, batch_size=batch_size, num_steps=num_steps, unconditional_guidance_scale=unconditional_guidance_scale, seed=seed, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, guidance_rescale=guidance_rescale, callback=callback) def image_to_image( self, prompt, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, seed=None, reference_image=None, reference_image_strength=0.8, original_size=None, crops_coords_top_left=(0, 0), target_size=None, guidance_rescale=0.7, callback=None): encoded_text, add_text_embeds = self.encode_text(prompt) return self.generate_image( encoded_text, add_text_embeds, negative_prompt=negative_prompt, batch_size=batch_size, num_steps=num_steps, unconditional_guidance_scale=unconditional_guidance_scale, seed=seed, reference_image=reference_image, reference_image_strength=reference_image_strength, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, guidance_rescale=guidance_rescale, callback=callback) def inpaint( self, prompt, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, seed=None, reference_image=None, reference_image_strength=0.8, inpaint_mask=None, mask_blur_strength=None, original_size=None, crops_coords_top_left=(0, 0), target_size=None, guidance_rescale=0.7, callback=None): encoded_text, add_text_embeds = self.encode_text(prompt) return self.generate_image( encoded_text, add_text_embeds, negative_prompt=negative_prompt, batch_size=batch_size, num_steps=num_steps, unconditional_guidance_scale=unconditional_guidance_scale, seed=seed, reference_image=reference_image, reference_image_strength=reference_image_strength, inpaint_mask=inpaint_mask, mask_blur_strength=mask_blur_strength, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, guidance_rescale=guidance_rescale, callback=callback) def encode_text(self, prompt): """Encodes a prompt into a latent text encoding. The encoding produced by this method should be used as the `encoded_text` parameter of `StableDiffusion.generate_image`. Encoding text separately from generating an image can be used to arbitrarily modify the text encoding prior to image generation, e.g. for walking between two prompts. Args: prompt: a string to encode, must be 77 tokens or shorter. Example: ```python from keras_cv.models import StableDiffusion model = StableDiffusionXL(img_height=1024, img_width=1024, jit_compile=True) encoded_text = model.encode_text("Tacos at dawn") img = model.generate_image(encoded_text) ``` """ # Tokenize prompt (i.e. starting context) context_openai, _ = get_weighted_text_embeddings(self.tokenizer, self.text_encoder_openai, prompt, model_max_length=MAX_PROMPT_LENGTH, pad_token_id=49407) context_laion, add_text_embeds = get_weighted_text_embeddings(self.tokenizer, self.text_encoder_laion, prompt, model_max_length=MAX_PROMPT_LENGTH, pad_token_id=0, text_encoder_pool=self.text_encoder_laion_proj) return np.concatenate([context_openai, context_laion], axis=-1), add_text_embeds def gaussian_blur(self, image, radius=3, h_axis=1, v_axis=2): def build_filter1d(kernel_size): if kernel_size == 1: filter1d = [1] else: triangle = [[1, 1]] for i in range(1, kernel_size - 1): cur_row = [1] prev_row = triangle[i - 1] for j in range(len(prev_row) - 1): cur_row.append(prev_row[j] + prev_row[j + 1]) cur_row.append(1) triangle.append(cur_row) filter1d = triangle[-1] filter1d = np.reshape(filter1d, (kernel_size,)) return filter1d / np.sum(filter1d) weights = build_filter1d(radius) # Apply filter horizontally blurred_image = correlate1d(image, weights, axis=h_axis, output=None, mode="reflect", cval=0.0, origin=0) # Apply filter vertically blurred_image = correlate1d(blurred_image, weights, axis=v_axis, output=None, mode="reflect", cval=0.0, origin=0) return blurred_image @staticmethod def resize(image_array, new_h=None, new_w=None): h, w, c = image_array.shape if new_h == h and new_w == w: return image_array h_bounds = 0, h - 1 w_bounds = 0, w - 1 y = np.expand_dims(np.linspace(h_bounds[0], h_bounds[1], new_h), axis=-1) x = np.expand_dims(np.linspace(w_bounds[0], w_bounds[1], new_w), axis=0) # Calculate the floor and ceiling values of x and y x_floor = np.floor(x).astype(int) x_ceil = np.ceil(x).astype(int) y_floor = np.floor(y).astype(int) y_ceil = np.ceil(y).astype(int) # Clip the values to stay within the image bounds x_floor = np.clip(x_floor, w_bounds[0], w_bounds[1]) x_ceil = np.clip(x_ceil, w_bounds[0], w_bounds[1]) y_floor = np.clip(y_floor, h_bounds[0], h_bounds[1]) y_ceil = np.clip(y_ceil, h_bounds[0], h_bounds[1]) # Calculate the fractional part of x and y dx = x - x_floor dy = y - y_floor # Get the values of the four neighboring pixels dx = np.expand_dims(dx, axis=-1) dy = np.expand_dims(dy, axis=-1) q11 = image_array[y_floor, x_floor, :] q21 = image_array[y_floor, x_ceil, :] q12 = image_array[y_ceil, x_floor, :] q22 = image_array[y_ceil, x_ceil, :] # Perform bilinear interpolation top_interp = q11 * (1.0 - dx) + q21 * dx bottom_interp = q12 * (1.0 - dx) + q22 * dx interpolated = top_interp * (1.0 - dy) + bottom_interp * dy return interpolated def preprocessed_image(self, x): if type(x) is str: x = np.array(Image.open(x).convert("RGB")) else: x = np.asarray(x) image_array = self.resize(x, self.img_height, self.img_width) image_array = np.array(image_array, dtype=np.float32) / 255.0 input_image_array = image_array[None, ..., :3] input_image_tensor = input_image_array * 2.0 - 1.0 return input_image_array, input_image_tensor def preprocessed_mask(self, x, blur_radius=5): if type(x) is str: x = np.array(Image.open(x).convert("L")) else: x = np.asarray(x) if len(x.shape) == 2: x = np.expand_dims(x, axis=-1) mask_array = self.resize(x, self.img_height, self.img_width) if mask_array.shape[-1] != 1: mask_array = np.mean(mask_array, axis=-1, keepdims=True) input_mask_array = np.array(mask_array, dtype=np.float32) / 255.0 if blur_radius is not None: input_mask_array = self.gaussian_blur(input_mask_array, radius=blur_radius, h_axis=0, v_axis=1) latent_mask_tensor = self.resize(input_mask_array, self.img_width // 8, self.img_height // 8) return np.expand_dims(input_mask_array, axis=0), np.expand_dims(latent_mask_tensor, axis=0) def rescale_noise_cfg(self, noise_cfg, noise_pred_text, guidance_rescale=0.0, epsilon=1e-05): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/abs/2305.08891). See Section 3.4 """ std_text = np.std(noise_pred_text, axis=tuple(range(1, len(noise_pred_text.shape))), keepdims=True) std_cfg = np.std(noise_cfg, axis=tuple(range(1, len(noise_cfg.shape))), keepdims=True) + epsilon # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1.0 - guidance_rescale) * noise_cfg return noise_cfg def generate_image( self, encoded_text, add_text_embeds, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, diffusion_noise=None, seed=None, inpaint_mask=None, mask_blur_strength=None, reference_image=None, reference_image_strength=0.8, callback=None, original_size=None, crops_coords_top_left=(0, 0), guidance_rescale=0.0, target_size=None): """Generates an image based on encoded text. The encoding passed to this method should be derived from `StableDiffusion.encode_text`. Args: encoded_text: Tensor of shape (`batch_size`, 77, 768), or a Tensor of shape (77, 768). When the batch axis is omitted, the same encoded text will be used to produce every generated image. batch_size: int, number of images to generate, defaults to 1. negative_prompt: a string containing information to negatively guide the image generation (e.g. by removing or altering certain aspects of the generated image), defaults to None. num_steps: int, number of diffusion steps (controls image quality), defaults to 50. unconditional_guidance_scale: float, controlling how closely the image should adhere to the prompt. Larger values result in more closely adhering to the prompt, but will make the image noisier. Defaults to 7.5. diffusion_noise: Tensor of shape (`batch_size`, img_height // 8, img_width // 8, 4), or a Tensor of shape (img_height // 8, img_width // 8, 4). Optional custom noise to seed the diffusion process. When the batch axis is omitted, the same noise will be used to seed diffusion for every generated image. seed: integer which is used to seed the random generation of diffusion noise, only to be specified if `diffusion_noise` is None. Example: ```python from stable_diffusion_xl.stable_diffusion_xl import StableDiffusionXL batch_size = 8 model = StableDiffusionXL(img_height=1024, img_width=1024, jit_compile=True) e_tacos = model.encode_text("Tacos at dawn") e_watermelons = model.encode_text("Watermelons at dusk") e_interpolated = tf.linspace(e_tacos, e_watermelons, batch_size) images = model.generate_image(e_interpolated, batch_size=batch_size) ``` """ if diffusion_noise is not None and seed is not None: raise ValueError( "`diffusion_noise` and `seed` should not both be passed to " "`generate_image`. `seed` is only used to generate diffusion " "noise when it's not already user-specified." ) context = self._expand_tensor(encoded_text, batch_size) if negative_prompt is None: negative_prompt = "" unconditional_context, unconditional_add_text_embeds = self.encode_text(negative_prompt) unconditional_context = self._expand_tensor(unconditional_context, batch_size) if diffusion_noise is not None: diffusion_noise = np.squeeze(diffusion_noise) if len(diffusion_noise.shape) == 3: diffusion_noise = np.repeat(np.expand_dims(diffusion_noise, axis=0), batch_size, axis=0) # Iterative reverse diffusion stage self.scheduler.set_timesteps(num_steps) timesteps = self.scheduler.timesteps[::-1] init_time = None init_latent = None input_image_array = None input_mask_array = None latent_mask_tensor = None if inpaint_mask is not None: input_mask_array, latent_mask_tensor = self.preprocessed_mask(inpaint_mask, mask_blur_strength) if input_mask_array is None or latent_mask_tensor is None: print("wrong inpaint mask:{}".format(inpaint_mask)) if reference_image is not None and (0. < reference_image_strength < 1.): input_image_array, input_image_tensor = self.preprocessed_image(reference_image) if input_image_tensor is not None: num_steps = int(num_steps * reference_image_strength + 0.5) init_time = timesteps[num_steps] init_latent = self.image_encoder.predict_on_batch(input_image_tensor) timesteps = timesteps[:num_steps] else: print("wrong reference image:{}".format(reference_image)) latent = self._get_initial_diffusion_latent(batch_size=batch_size, init_latent=init_latent, init_time=init_time, seed=seed, noise=diffusion_noise) progbar = tf.keras.utils.Progbar(len(timesteps)) iteration = 0 if original_size is None: original_size = [self.img_height, self.img_width] if target_size is None: target_size = [self.img_height, self.img_width] add_time_ids = tf.expand_dims( tf.convert_to_tensor(list(list(original_size) + list(crops_coords_top_left) + list(target_size)), latent.dtype), axis=0) for index, timestep in list(enumerate(timesteps))[::-1]: latent_prev = latent # Set aside the previous latent vector time_emb = np.repeat(np.reshape(timestep, [1, -1]), batch_size, axis=0) if unconditional_guidance_scale > 0.0: unconditional_latent = self.diffusion_model.predict_on_batch( [latent, time_emb, unconditional_context, add_time_ids, tf.zeros_like(add_text_embeds)]) latent_text = self.diffusion_model.predict_on_batch( [latent, time_emb, context, add_time_ids, add_text_embeds]) latent = unconditional_latent + unconditional_guidance_scale * ( latent_text - unconditional_latent) if guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/abs/2305.08891 latent = self.rescale_noise_cfg(latent, latent_text, guidance_rescale=guidance_rescale) else: latent = self.diffusion_model.predict_on_batch( [latent, time_emb, context, add_time_ids, add_text_embeds]) latent = self.scheduler.step(latent, timestep, latent_prev) if latent_mask_tensor is not None and init_latent is not None: latent_orgin = self._get_initial_diffusion_latent(batch_size=batch_size, init_latent=init_latent, init_time=timestep, seed=seed, noise=diffusion_noise) latent = latent_orgin * (1. - latent_mask_tensor) + latent * latent_mask_tensor iteration += 1 if callback is not None: callback(iteration) progbar.update(iteration) # Decoding stage decoded = self.image_decoder.predict_on_batch(latent) decoded = np.array(((decoded + 1.) * 0.5), dtype=np.float32) if input_mask_array is not None and input_image_array is not None: decoded = input_image_array * (1. - input_mask_array) + decoded * input_mask_array return np.clip(decoded * 255., 0, 255).astype("uint8") def _expand_tensor(self, text_embedding, batch_size): """Extends a tensor by repeating it to fit the shape of the given batch size.""" text_embedding = np.squeeze(text_embedding) if len(text_embedding.shape) == 2: text_embedding = np.repeat( np.expand_dims(text_embedding, axis=0), batch_size, axis=0 ) return text_embedding @property def image_encoder(self): pass @property def text_encoder_openai(self): pass @property def text_encoder_laion(self): pass @property def text_encoder_laion_proj(self): pass @property def diffusion_model(self): pass @property def image_decoder(self): pass @property def tokenizer(self): """tokenizer returns the tokenizer used for text inputs. Can be overriden for tasks like textual inversion where the tokenizer needs to be modified. """ if self._tokenizer is None: self._tokenizer = SimpleTokenizer() return self._tokenizer def _get_initial_diffusion_noise(self, batch_size, seed): if seed is not None: try: seed = int(seed) except: seed = None return tf.random.stateless_normal( (batch_size, self.img_height // 8, self.img_width // 8, 4), seed=[seed, seed], ) else: return tf.random.normal( (batch_size, self.img_height // 8, self.img_width // 8, 4) ) def _get_initial_diffusion_latent(self, batch_size, init_latent=None, init_time=None, seed=None, noise=None): if noise is None: noise = self._get_initial_diffusion_noise(batch_size, seed=seed) if init_latent is None: latent = noise else: latent = self.scheduler.signal_rates[init_time] * np.repeat(init_latent, batch_size, axis=0) + \ self.scheduler.noise_rates[init_time] * noise return latent @staticmethod def _get_pos_ids(): return np.asarray([list(range(MAX_PROMPT_LENGTH))], dtype=np.int32) class StableDiffusionXL(StableDiffusionXLBase): """Keras implementation of Stable Diffusion. Note that the StableDiffusionXL API, as well as the APIs of the sub-components of StableDiffusionXL (e.g. ImageEncoder, DiffusionModel) should be considered unstable at this point. We do not guarantee backwards compatability for future changes to these APIs. Stable Diffusion is a powerful image generation model that can be used, among other things, to generate pictures according to a short text description (called a "prompt"). Arguments: img_height: int, height of the images to generate, in pixel. Note that only multiples of 128 are supported; the value provided will be rounded to the nearest valid value. Defaults to 1024. img_width: int, width of the images to generate, in pixel. Note that only multiples of 128 are supported; the value provided will be rounded to the nearest valid value. Defaults to 1024. jit_compile: bool, whether to compile the underlying models to XLA. This can lead to a significant speedup on some systems. Defaults to False. Example: ```python from stable_diffusion_xl.stable_diffusion_xl import StableDiffusionXL from PIL import Image model = StableDiffusionXL(img_height=1024, img_width=1024, jit_compile=True) img = model.text_to_image( prompt="A beautiful horse running through a field", batch_size=1, # How many images to generate at once num_steps=25, # Number of iterations (controls image quality) seed=123, # Set this to always get the same image from the same prompt ) Image.fromarray(img[0]).save("horse.png") print("saved at horse.png") ``` References: - [About Stable Diffusion](https://stability.ai/blog/stable-diffusion-announcement) - [Original implementation](https://github.com/CompVis/stable-diffusion) """ # noqa: E501 def __init__( self, img_height=1024, img_width=1024, jit_compile=True, unet_ckpt=None, text_encoder_ckpt=None, text_encoder2_ckpt=None, vae_ckpt=None, ): super().__init__(img_height, img_width, jit_compile) self.unet_ckpt = unet_ckpt self.text_encoder_ckpt = text_encoder_ckpt self.text_encoder2_ckpt = text_encoder2_ckpt self.vae_ckpt = vae_ckpt @property def text_encoder_openai(self): """text_encoder returns the text encoder with pretrained weights. Can be overriden for tasks like textual inversion where the text encoder needs to be modified. """ if self._text_encoder_openai is None: self._text_encoder_openai = TextEncoderOpenAi(MAX_PROMPT_LENGTH, ckpt_path=self.text_encoder_ckpt) if self.jit_compile: self._text_encoder_openai.compile(jit_compile=True) return self._text_encoder_openai @property def text_encoder_laion(self): """text_encoder returns the text encoder with pretrained weights. Can be overriden for tasks like textual inversion where the text encoder needs to be modified. """ if self._text_encoder_laion is None: self._text_encoder_laion = TextEncoderLaion(MAX_PROMPT_LENGTH, ckpt_path=self.text_encoder2_ckpt) if self.jit_compile: self._text_encoder_laion.compile(jit_compile=True) return self._text_encoder_laion @property def text_encoder_laion_proj(self): """text_encoder returns the text encoder with pretrained weights. Can be overriden for tasks like textual inversion where the text encoder needs to be modified. """ if self._text_encoder_laion_proj is None:
self._text_encoder_laion_proj = TextEncoderLaionProj(ckpt_path=self.text_encoder2_ckpt)
7
2023-10-14 18:40:16+00:00
16k
spla-tam/SplaTAM
scripts/iphone_demo.py
[ { "identifier": "relative_transformation", "path": "datasets/gradslam_datasets/geometryutils.py", "snippet": "def relative_transformation(\n trans_01: torch.Tensor, trans_02: torch.Tensor, orthogonal_rotations: bool = False\n) -> torch.Tensor:\n r\"\"\"Function that computes the relative homogenou...
import argparse import os import shutil import sys import time import json import cv2 import matplotlib.pyplot as plt import numpy as np import torch import torch.nn.functional as F import cyclonedds.idl as idl import cyclonedds.idl.annotations as annotate import cyclonedds.idl.types as types from pathlib import Path from importlib.machinery import SourceFileLoader from tqdm import tqdm from datasets.gradslam_datasets.geometryutils import relative_transformation from utils.common_utils import seed_everything, save_params_ckpt, save_params from utils.eval_helpers import report_progress from utils.keyframe_selection import keyframe_selection_overlap from utils.recon_helpers import setup_camera from utils.slam_external import build_rotation, prune_gaussians, densify from scripts.splatam import get_loss, initialize_optimizer, initialize_params, initialize_camera_pose, get_pointcloud, add_new_gaussians from diff_gaussian_rasterization import GaussianRasterizer as Renderer from dataclasses import dataclass from cyclonedds.domain import DomainParticipant, Domain from cyclonedds.core import Qos, Policy from cyclonedds.sub import DataReader from cyclonedds.topic import Topic from cyclonedds.util import duration
13,634
curr_gt_w2c = gt_w2c_all_frames curr_data = {'cam': cam, 'im': color, 'depth':depth, 'id': iter_time_idx, 'intrinsics': intrinsics, 'w2c': first_frame_w2c, 'iter_gt_w2c_list': curr_gt_w2c} tracking_curr_data = curr_data # Optimization Iterations num_iters_mapping = config['mapping']['num_iters'] # Initialize the camera pose for the current frame if time_idx > 0: params = initialize_camera_pose(params, time_idx, forward_prop=config['tracking']['forward_prop']) # Tracking tracking_start_time = time.time() if time_idx > 0 and not config['tracking']['use_gt_poses']: # Reset Optimizer & Learning Rates for tracking optimizer = initialize_optimizer(params, config['tracking']['lrs'], tracking=True) # Keep Track of Best Candidate Rotation & Translation candidate_cam_unnorm_rot = params['cam_unnorm_rots'][..., time_idx].detach().clone() candidate_cam_tran = params['cam_trans'][..., time_idx].detach().clone() current_min_loss = float(1e20) # Tracking Optimization iter = 0 do_continue_slam = False num_iters_tracking = config['tracking']['num_iters'] progress_bar = tqdm(range(num_iters_tracking), desc=f"Tracking Time Step: {time_idx}") while True: iter_start_time = time.time() # Loss for current frame loss, variables, losses = get_loss(params, tracking_curr_data, variables, iter_time_idx, config['tracking']['loss_weights'], config['tracking']['use_sil_for_loss'], config['tracking']['sil_thres'], config['tracking']['use_l1'], config['tracking']['ignore_outlier_depth_loss'], tracking=True, visualize_tracking_loss=config['tracking']['visualize_tracking_loss'], tracking_iteration=iter) # Backprop loss.backward() # Optimizer Update optimizer.step() optimizer.zero_grad(set_to_none=True) with torch.no_grad(): # Save the best candidate rotation & translation if loss < current_min_loss: current_min_loss = loss candidate_cam_unnorm_rot = params['cam_unnorm_rots'][..., time_idx].detach().clone() candidate_cam_tran = params['cam_trans'][..., time_idx].detach().clone() # Report Progress if config['report_iter_progress']: report_progress(params, tracking_curr_data, iter+1, progress_bar, iter_time_idx, sil_thres=config['tracking']['sil_thres'], tracking=True) else: progress_bar.update(1) # Update the runtime numbers iter_end_time = time.time() tracking_iter_time_sum += iter_end_time - iter_start_time tracking_iter_time_count += 1 # Check if we should stop tracking iter += 1 if iter == num_iters_tracking: if losses['depth'] < config['tracking']['depth_loss_thres'] and config['tracking']['use_depth_loss_thres']: break elif config['tracking']['use_depth_loss_thres'] and not do_continue_slam: do_continue_slam = True progress_bar = tqdm(range(num_iters_tracking), desc=f"Tracking Time Step: {time_idx}") num_iters_tracking = 2*num_iters_tracking else: break progress_bar.close() # Copy over the best candidate rotation & translation with torch.no_grad(): params['cam_unnorm_rots'][..., time_idx] = candidate_cam_unnorm_rot params['cam_trans'][..., time_idx] = candidate_cam_tran elif time_idx > 0 and config['tracking']['use_gt_poses']: with torch.no_grad(): # Get the ground truth pose relative to frame 0 rel_w2c = curr_gt_w2c[-1] rel_w2c_rot = rel_w2c[:3, :3].unsqueeze(0).detach() rel_w2c_rot_quat = matrix_to_quaternion(rel_w2c_rot) rel_w2c_tran = rel_w2c[:3, 3].detach() # Update the camera parameters params['cam_unnorm_rots'][..., time_idx] = rel_w2c_rot_quat params['cam_trans'][..., time_idx] = rel_w2c_tran # Update the runtime numbers tracking_end_time = time.time() tracking_frame_time_sum += tracking_end_time - tracking_start_time tracking_frame_time_count += 1 if time_idx == 0 or (time_idx+1) % config['report_global_progress_every'] == 0: try: # Report Final Tracking Progress progress_bar = tqdm(range(1), desc=f"Tracking Result Time Step: {time_idx}") with torch.no_grad(): report_progress(params, tracking_curr_data, 1, progress_bar, iter_time_idx, sil_thres=config['tracking']['sil_thres'], tracking=True) progress_bar.close() except: ckpt_output_dir = save_path.joinpath("checkpoints") os.makedirs(ckpt_output_dir, exist_ok=True) save_params_ckpt(params, ckpt_output_dir, time_idx) print('Failed to evaluate trajectory.') # Densification & KeyFrame-based Mapping if time_idx == 0 or (time_idx+1) % config['map_every'] == 0: # Densification if config['mapping']['add_new_gaussians'] and time_idx > 0: densify_curr_data = {'cam': densify_cam, 'im': densify_color, 'depth': densify_depth, 'id': time_idx, 'intrinsics': densify_intrinsics, 'w2c': first_frame_w2c, 'iter_gt_w2c_list': curr_gt_w2c} # Add new Gaussians to the scene based on the Silhouette params, variables = add_new_gaussians(params, variables, densify_curr_data, config['mapping']['sil_thres'], time_idx, config['mean_sq_dist_method']) with torch.no_grad(): # Get the current estimated rotation & translation curr_cam_rot = F.normalize(params['cam_unnorm_rots'][..., time_idx].detach()) curr_cam_tran = params['cam_trans'][..., time_idx].detach() curr_w2c = torch.eye(4).cuda().float() curr_w2c[:3, :3] = build_rotation(curr_cam_rot) curr_w2c[:3, 3] = curr_cam_tran # Select Keyframes for Mapping num_keyframes = config['mapping_window_size']-2
""" Script to stream RGB-D data from the NeRFCapture iOS App & build a Gaussian Splat on the fly using SplaTAM. The CycloneDDS parts of this script are adapted from the Instant-NGP Repo: https://github.com/NVlabs/instant-ngp/blob/master/scripts/nerfcapture2nerf.py """ #!/usr/bin/env python3 _BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, _BASE_DIR) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--config", default="./configs/iphone/online_demo.py", type=str, help="Path to config file.") return parser.parse_args() # DDS # ================================================================================================== @dataclass @annotate.final @annotate.autoid("sequential") class SplatCaptureFrame(idl.IdlStruct, typename="SplatCaptureData.SplatCaptureFrame"): id: types.uint32 annotate.key("id") timestamp: types.float64 fl_x: types.float32 fl_y: types.float32 cx: types.float32 cy: types.float32 transform_matrix: types.array[types.float32, 16] width: types.uint32 height: types.uint32 image: types.sequence[types.uint8] has_depth: bool depth_width: types.uint32 depth_height: types.uint32 depth_scale: types.float32 depth_image: types.sequence[types.uint8] dds_config = """<?xml version="1.0" encoding="UTF-8" ?> \ <CycloneDDS xmlns="https://cdds.io/config" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://cdds.io/config https://raw.githubusercontent.com/eclipse-cyclonedds/cyclonedds/master/etc/cyclonedds.xsd"> \ <Domain id="any"> \ <Internal> \ <MinimumSocketReceiveBufferSize>10MB</MinimumSocketReceiveBufferSize> \ </Internal> \ <Tracing> \ <Verbosity>config</Verbosity> \ <OutputFile>stdout</OutputFile> \ </Tracing> \ </Domain> \ </CycloneDDS> \ """ # ================================================================================================== def dataset_capture_loop(reader: DataReader, save_path: Path, overwrite: bool, n_frames: int, depth_scale: float, config: dict): rgb_path = save_path.joinpath("rgb") if rgb_path.exists(): if overwrite: # Prompt user to confirm deletion if (input(f"warning! folder '{save_path}' will be deleted/replaced. continue? (Y/n)").lower().strip()+"y")[:1] != "y": sys.exit(1) shutil.rmtree(save_path) else: print(f"rgb_path {rgb_path} already exists. Please use overwrite=True in config if you want to overwrite.") sys.exit(1) print("Waiting for frames...") # Make directory images_dir = save_path.joinpath("rgb") manifest = { "fl_x": 0.0, "fl_y": 0.0, "cx": 0.0, "cy": 0.0, "w": 0.0, "h": 0.0, "frames": [] } total_frames = 0 # Total frames received time_idx = total_frames num_frames = n_frames # Total frames desired # Initialize list to keep track of Keyframes keyframe_list = [] keyframe_time_indices = [] # Init Variables to keep track of ARkit poses and runtimes gt_w2c_all_frames = [] tracking_iter_time_sum = 0 tracking_iter_time_count = 0 mapping_iter_time_sum = 0 mapping_iter_time_count = 0 tracking_frame_time_sum = 0 tracking_frame_time_count = 0 mapping_frame_time_sum = 0 mapping_frame_time_count = 0 P = torch.tensor( [ [1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1] ] ).float() # Start DDS Loop while True: sample = reader.read_next() # Get frame from NeRFCapture if sample: print(f"{total_frames + 1}/{n_frames} frames received") if total_frames == 0: save_path.mkdir(parents=True, exist_ok=True) images_dir.mkdir(exist_ok=True) manifest["w"] = sample.width manifest["h"] = sample.height manifest["cx"] = sample.cx manifest["cy"] = sample.cy manifest["fl_x"] = sample.fl_x manifest["fl_y"] = sample.fl_y manifest["integer_depth_scale"] = float(depth_scale)/65535.0 if sample.has_depth: depth_dir = save_path.joinpath("depth") depth_dir.mkdir(exist_ok=True) # RGB image = np.asarray(sample.image, dtype=np.uint8).reshape((sample.height, sample.width, 3)) cv2.imwrite(str(images_dir.joinpath(f"{total_frames}.png")), cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) # Depth if avaiable save_depth = None if sample.has_depth: # Save Depth Image save_depth = np.asarray(sample.depth_image, dtype=np.uint8).view( dtype=np.float32).reshape((sample.depth_height, sample.depth_width)) save_depth = (save_depth*65535/float(depth_scale)).astype(np.uint16) save_depth = cv2.resize(save_depth, dsize=( sample.width, sample.height), interpolation=cv2.INTER_NEAREST) cv2.imwrite(str(depth_dir.joinpath(f"{total_frames}.png")), save_depth) # Load Depth Image for SplaTAM curr_depth = np.asarray(sample.depth_image, dtype=np.uint8).view( dtype=np.float32).reshape((sample.depth_height, sample.depth_width)) else: print("No Depth Image Received. Please make sure that the NeRFCapture App \ mentions Depth Supported on the top right corner. Skipping Frame...") continue # ARKit Poses for saving dataset X_WV = np.asarray(sample.transform_matrix, dtype=np.float32).reshape((4, 4)).T frame = { "transform_matrix": X_WV.tolist(), "file_path": f"rgb/{total_frames}.png", "fl_x": sample.fl_x, "fl_y": sample.fl_y, "cx": sample.cx, "cy": sample.cy, "w": sample.width, "h": sample.height } if save_depth is not None: frame["depth_path"] = f"depth/{total_frames}.png" manifest["frames"].append(frame) # Convert ARKit Pose to GradSLAM format gt_pose = torch.from_numpy(X_WV).float() gt_pose = P @ gt_pose @ P.T if time_idx == 0: first_abs_gt_pose = gt_pose gt_pose = relative_transformation(first_abs_gt_pose.unsqueeze(0), gt_pose.unsqueeze(0), orthogonal_rotations=False) gt_w2c = torch.linalg.inv(gt_pose[0]) gt_w2c_all_frames.append(gt_w2c) # Initialize Tracking & Mapping Resolution Data color = cv2.resize(image, dsize=( config['data']['desired_image_width'], config['data']['desired_image_height']), interpolation=cv2.INTER_LINEAR) depth = cv2.resize(curr_depth, dsize=( config['data']['desired_image_width'], config['data']['desired_image_height']), interpolation=cv2.INTER_NEAREST) depth = np.expand_dims(depth, -1) color = torch.from_numpy(color).cuda().float() color = color.permute(2, 0, 1) / 255 depth = torch.from_numpy(depth).cuda().float() depth = depth.permute(2, 0, 1) if time_idx == 0: intrinsics = torch.tensor([[sample.fl_x, 0, sample.cx], [0, sample.fl_y, sample.cy], [0, 0, 1]]).cuda().float() intrinsics = intrinsics / config['data']['downscale_factor'] intrinsics[2, 2] = 1.0 first_frame_w2c = torch.eye(4).cuda().float() cam = setup_camera(color.shape[2], color.shape[1], intrinsics.cpu().numpy(), first_frame_w2c.cpu().numpy()) # Initialize Densification Resolution Data densify_color = cv2.resize(image, dsize=( config['data']['densification_image_width'], config['data']['densification_image_height']), interpolation=cv2.INTER_LINEAR) densify_depth = cv2.resize(curr_depth, dsize=( config['data']['densification_image_width'], config['data']['densification_image_height']), interpolation=cv2.INTER_NEAREST) densify_depth = np.expand_dims(densify_depth, -1) densify_color = torch.from_numpy(densify_color).cuda().float() densify_color = densify_color.permute(2, 0, 1) / 255 densify_depth = torch.from_numpy(densify_depth).cuda().float() densify_depth = densify_depth.permute(2, 0, 1) if time_idx == 0: densify_intrinsics = torch.tensor([[sample.fl_x, 0, sample.cx], [0, sample.fl_y, sample.cy], [0, 0, 1]]).cuda().float() densify_intrinsics = densify_intrinsics / config['data']['densify_downscale_factor'] densify_intrinsics[2, 2] = 1.0 densify_cam = setup_camera(densify_color.shape[2], densify_color.shape[1], densify_intrinsics.cpu().numpy(), first_frame_w2c.cpu().numpy()) # Initialize Params for first time step if time_idx == 0: # Get Initial Point Cloud mask = (densify_depth > 0) # Mask out invalid depth values mask = mask.reshape(-1) init_pt_cld, mean3_sq_dist = get_pointcloud(densify_color, densify_depth, densify_intrinsics, first_frame_w2c, mask=mask, compute_mean_sq_dist=True, mean_sq_dist_method=config['mean_sq_dist_method']) params, variables = initialize_params(init_pt_cld, num_frames, mean3_sq_dist) variables['scene_radius'] = torch.max(densify_depth)/config['scene_radius_depth_ratio'] # Initialize Mapping & Tracking for current frame iter_time_idx = time_idx curr_gt_w2c = gt_w2c_all_frames curr_data = {'cam': cam, 'im': color, 'depth':depth, 'id': iter_time_idx, 'intrinsics': intrinsics, 'w2c': first_frame_w2c, 'iter_gt_w2c_list': curr_gt_w2c} tracking_curr_data = curr_data # Optimization Iterations num_iters_mapping = config['mapping']['num_iters'] # Initialize the camera pose for the current frame if time_idx > 0: params = initialize_camera_pose(params, time_idx, forward_prop=config['tracking']['forward_prop']) # Tracking tracking_start_time = time.time() if time_idx > 0 and not config['tracking']['use_gt_poses']: # Reset Optimizer & Learning Rates for tracking optimizer = initialize_optimizer(params, config['tracking']['lrs'], tracking=True) # Keep Track of Best Candidate Rotation & Translation candidate_cam_unnorm_rot = params['cam_unnorm_rots'][..., time_idx].detach().clone() candidate_cam_tran = params['cam_trans'][..., time_idx].detach().clone() current_min_loss = float(1e20) # Tracking Optimization iter = 0 do_continue_slam = False num_iters_tracking = config['tracking']['num_iters'] progress_bar = tqdm(range(num_iters_tracking), desc=f"Tracking Time Step: {time_idx}") while True: iter_start_time = time.time() # Loss for current frame loss, variables, losses = get_loss(params, tracking_curr_data, variables, iter_time_idx, config['tracking']['loss_weights'], config['tracking']['use_sil_for_loss'], config['tracking']['sil_thres'], config['tracking']['use_l1'], config['tracking']['ignore_outlier_depth_loss'], tracking=True, visualize_tracking_loss=config['tracking']['visualize_tracking_loss'], tracking_iteration=iter) # Backprop loss.backward() # Optimizer Update optimizer.step() optimizer.zero_grad(set_to_none=True) with torch.no_grad(): # Save the best candidate rotation & translation if loss < current_min_loss: current_min_loss = loss candidate_cam_unnorm_rot = params['cam_unnorm_rots'][..., time_idx].detach().clone() candidate_cam_tran = params['cam_trans'][..., time_idx].detach().clone() # Report Progress if config['report_iter_progress']: report_progress(params, tracking_curr_data, iter+1, progress_bar, iter_time_idx, sil_thres=config['tracking']['sil_thres'], tracking=True) else: progress_bar.update(1) # Update the runtime numbers iter_end_time = time.time() tracking_iter_time_sum += iter_end_time - iter_start_time tracking_iter_time_count += 1 # Check if we should stop tracking iter += 1 if iter == num_iters_tracking: if losses['depth'] < config['tracking']['depth_loss_thres'] and config['tracking']['use_depth_loss_thres']: break elif config['tracking']['use_depth_loss_thres'] and not do_continue_slam: do_continue_slam = True progress_bar = tqdm(range(num_iters_tracking), desc=f"Tracking Time Step: {time_idx}") num_iters_tracking = 2*num_iters_tracking else: break progress_bar.close() # Copy over the best candidate rotation & translation with torch.no_grad(): params['cam_unnorm_rots'][..., time_idx] = candidate_cam_unnorm_rot params['cam_trans'][..., time_idx] = candidate_cam_tran elif time_idx > 0 and config['tracking']['use_gt_poses']: with torch.no_grad(): # Get the ground truth pose relative to frame 0 rel_w2c = curr_gt_w2c[-1] rel_w2c_rot = rel_w2c[:3, :3].unsqueeze(0).detach() rel_w2c_rot_quat = matrix_to_quaternion(rel_w2c_rot) rel_w2c_tran = rel_w2c[:3, 3].detach() # Update the camera parameters params['cam_unnorm_rots'][..., time_idx] = rel_w2c_rot_quat params['cam_trans'][..., time_idx] = rel_w2c_tran # Update the runtime numbers tracking_end_time = time.time() tracking_frame_time_sum += tracking_end_time - tracking_start_time tracking_frame_time_count += 1 if time_idx == 0 or (time_idx+1) % config['report_global_progress_every'] == 0: try: # Report Final Tracking Progress progress_bar = tqdm(range(1), desc=f"Tracking Result Time Step: {time_idx}") with torch.no_grad(): report_progress(params, tracking_curr_data, 1, progress_bar, iter_time_idx, sil_thres=config['tracking']['sil_thres'], tracking=True) progress_bar.close() except: ckpt_output_dir = save_path.joinpath("checkpoints") os.makedirs(ckpt_output_dir, exist_ok=True) save_params_ckpt(params, ckpt_output_dir, time_idx) print('Failed to evaluate trajectory.') # Densification & KeyFrame-based Mapping if time_idx == 0 or (time_idx+1) % config['map_every'] == 0: # Densification if config['mapping']['add_new_gaussians'] and time_idx > 0: densify_curr_data = {'cam': densify_cam, 'im': densify_color, 'depth': densify_depth, 'id': time_idx, 'intrinsics': densify_intrinsics, 'w2c': first_frame_w2c, 'iter_gt_w2c_list': curr_gt_w2c} # Add new Gaussians to the scene based on the Silhouette params, variables = add_new_gaussians(params, variables, densify_curr_data, config['mapping']['sil_thres'], time_idx, config['mean_sq_dist_method']) with torch.no_grad(): # Get the current estimated rotation & translation curr_cam_rot = F.normalize(params['cam_unnorm_rots'][..., time_idx].detach()) curr_cam_tran = params['cam_trans'][..., time_idx].detach() curr_w2c = torch.eye(4).cuda().float() curr_w2c[:3, :3] = build_rotation(curr_cam_rot) curr_w2c[:3, 3] = curr_cam_tran # Select Keyframes for Mapping num_keyframes = config['mapping_window_size']-2
selected_keyframes = keyframe_selection_overlap(depth, curr_w2c, intrinsics, keyframe_list[:-1], num_keyframes)
5
2023-11-30 20:26:47+00:00
16k
zhyever/PatchFusion
ControlNet/ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ControlNet/ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\...
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ControlNet.ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ControlNet.ldm.modules.ema import LitEma from ControlNet.ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ControlNet.ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ControlNet.ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ControlNet.ldm.models.diffusion.ddim import DDIMSampler
12,548
assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None):
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
2
2023-12-04 08:43:15+00:00
16k
baaivision/GeoDream
extern/ldm_zero123/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"i...
import itertools import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager, nullcontext from functools import partial from einops import rearrange, repeat from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import ( AutoencoderKL, IdentityFirstStage, VQModelInterface, ) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like, ) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl, ) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import ( count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat, )
12,126
): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: # todo: get null label from cond_stage_model raise NotImplementedError() c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) cond = {} cond["c_crossattn"] = [c] cond["c_concat"] = [ torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to( self.device ) ] return cond @torch.no_grad() def log_images( self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1.0, return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1.0, unconditional_guidance_label=None, use_ema_scope=True, **kwargs, ): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input( batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N, ) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25, ) log["conditioning"] = xc elif self.cond_stage_key == "class_label": xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25, ) log["conditioning"] = xc elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, "n b c h w -> b n c h w") diffusion_grid = rearrange(diffusion_grid, "b n c h w -> (b n) c h w") diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with ema_scope("Sampling"): samples, z_denoise_row = self.sample_log( cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta, ) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid if ( quantize_denoised
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def get_loss(self, pred, target, mean=True): if self.loss_type == "l1": loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == "l2": if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction="none") else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError( f"Paramterization {self.parameterization} not yet supported" ) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = "train" if self.training else "val" loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f"{log_prefix}/loss": loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True ) self.log( "global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, ) if self.use_scheduler: lr = self.optimizers().param_groups[0]["lr"] self.log( "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False ) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True ) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != "__is_first_stage__" assert config != "__is_unconditional__" model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list( self, samples, desc="", force_no_decoder_quantization=False ): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization ) ) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError( f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" ) return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, "encode") and callable( self.cond_stage_model.encode ): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min( torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1 )[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip( weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip( L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"], ) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold( self, x, kernel_size, stride, uf=1, df=1 ): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting( kernel_size[0], kernel_size[1], Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf), ) fold = torch.nn.Fold( output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h * uf, w * uf ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx) ) elif df > 1 and uf == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df), ) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) ) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input( self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.05, ): x = super().get_input(batch, k) T = batch["T"].to(memory_format=torch.contiguous_format).float() if bs is not None: x = x[:bs] T = T[:bs].to(self.device) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() cond_key = cond_key or self.cond_stage_key xc = super().get_input(batch, cond_key).to(self.device) if bs is not None: xc = xc[:bs] cond = {} # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") input_mask = 1 - rearrange( (random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1" ) null_prompt = self.get_learned_conditioning([""]) # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] # print('=========== xc shape ===========', xc.shape) with torch.enable_grad(): clip_emb = self.get_learned_conditioning(xc).detach() null_prompt = self.get_learned_conditioning([""]).detach() cond["c_crossattn"] = [ self.cc_projection( torch.cat( [ torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :], ], dim=-1, ) ) ] cond["c_concat"] = [ input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach() ] out = [z, cond] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out # @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, "b h w c -> b c h w").contiguous() z = 1.0 / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf ) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [ self.first_stage_model.decode( z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize, ) for i in range(z.shape[-1]) ] else: output_list = [ self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize ) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize ) else: return self.first_stage_model.decode(z) # @torch.no_grad() # wasted two hours to find this bug... why no grad here! def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params["original_image_size"] = x.shape[-2:] bs, nc, h, w = x.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( x, ks, stride, df=df ) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) output_list = [ self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() if self.model.conditioning_key is not None: assert c is not None # if self.cond_stage_trainable: # c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = ( "c_concat" if self.model.conditioning_key == "concat" else "c_crossattn" ) cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold( x_noisy, ks, stride ) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if ( self.cond_stage_key in ["image", "LR_image", "segmentation", "bbox_img"] and self.model.conditioning_key ): # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert len(c) == 1 # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view( (c.shape[0], -1, ks[0], ks[1], c.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == "coordinates_bbox": assert ( "original_image_size" in self.split_input_params ), "BoudingBoxRescaling is missing original_image_size" # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params["original_image_size"] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [ ( rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h, ) for patch_nr in range(z.shape[-1]) ] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [ ( x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h, ) for x_tl, y_tl in tl_patch_coordinates ] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [ torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to( self.device ) for bbox in patch_limits ] # list of length l with tensors of shape (1, 2) # cut tknzd crop position from conditioning assert isinstance(cond, dict), "cond must be dict to be fed into model" cut_cond = cond["c_crossattn"][0][..., :-2].to(self.device) adapted_cond = torch.stack( [torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd] ) adapted_cond = rearrange(adapted_cond, "l b n -> (l b) n") adapted_cond = self.get_learned_conditioning(adapted_cond) adapted_cond = rearrange( adapted_cond, "(l b) n d -> l b n d", l=z.shape[-1] ) cond_list = [{"c_crossattn": [e]} for e in adapted_cond] else: cond_list = [ cond for i in range(z.shape[-1]) ] # Todo make this more efficient # apply model by loop over crops output_list = [ self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1]) ] assert not isinstance( output_list[0], tuple ) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart ) / extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl( mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0 ) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = "train" if self.training else "val" if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f"{prefix}/loss_simple": loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f"{prefix}/loss_gamma": loss.mean()}) loss_dict.update({"logvar": self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f"{prefix}/loss_vlb": loss_vlb}) loss += self.original_elbo_weight * loss_vlb loss_dict.update({f"{prefix}/loss": loss}) return loss, loss_dict def p_mean_variance( self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None, ): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score( self, model_out, x, t, c, **corrector_kwargs ) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1.0, 1.0) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample( self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, ): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance( x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, ) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.0: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * ( 0.5 * model_log_variance ).exp() * noise, logits.argmax(dim=1) if return_x0: return ( model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0, ) else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising( self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None, ): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = { key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) if start_T is not None: timesteps = min(timesteps, start_T) iterator = ( tqdm( reversed(range(0, timesteps)), desc="Progressive Generation", total=timesteps, ) if verbose else reversed(range(0, timesteps)) ) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != "hybrid" tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample( img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, ) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1.0 - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop( self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None, ): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = ( tqdm(reversed(range(0, timesteps)), desc="Sampling t", total=timesteps) if verbose else reversed(range(0, timesteps)) ) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != "hybrid" tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample( img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, ) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1.0 - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample( self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs, ): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = { key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) return self.p_sample_loop( cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0, ) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample( ddim_steps, batch_size, shape, cond, verbose=False, **kwargs ) else: samples, intermediates = self.sample( cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs ) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning( self, batch_size, null_label=None, image_size=512 ): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: # todo: get null label from cond_stage_model raise NotImplementedError() c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) cond = {} cond["c_crossattn"] = [c] cond["c_concat"] = [ torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to( self.device ) ] return cond @torch.no_grad() def log_images( self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1.0, return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1.0, unconditional_guidance_label=None, use_ema_scope=True, **kwargs, ): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input( batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N, ) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25, ) log["conditioning"] = xc elif self.cond_stage_key == "class_label": xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25, ) log["conditioning"] = xc elif isimage(xc): log["conditioning"] = xc if ismap(xc): log["original_conditioning"] = self.to_rgb(xc) if plot_diffusion_rows: # get diffusion row diffusion_row = list() z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(z_start) z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) diffusion_row.append(self.decode_first_stage(z_noisy)) diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W diffusion_grid = rearrange(diffusion_row, "n b c h w -> b n c h w") diffusion_grid = rearrange(diffusion_grid, "b n c h w -> (b n) c h w") diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) log["diffusion_row"] = diffusion_grid if sample: # get denoise row with ema_scope("Sampling"): samples, z_denoise_row = self.sample_log( cond=c, batch_size=N, ddim=use_ddim, ddim_steps=ddim_steps, eta=ddim_eta, ) # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) x_samples = self.decode_first_stage(samples) log["samples"] = x_samples if plot_denoise_rows: denoise_grid = self._get_denoise_row_from_list(z_denoise_row) log["denoise_row"] = denoise_grid if ( quantize_denoised
and not isinstance(self.first_stage_model, AutoencoderKL)
0
2023-12-01 01:59:42+00:00
16k
lucidrains/meshgpt-pytorch
meshgpt_pytorch/trainer.py
[ { "identifier": "custom_collate", "path": "meshgpt_pytorch/data.py", "snippet": "def custom_collate(data, pad_id = -1):\n is_dict = isinstance(first(data), dict)\n\n if is_dict:\n keys = first(data).keys()\n data = [d.values() for d in data]\n\n output = []\n\n for datum in zip...
from pathlib import Path from functools import partial from packaging import version from contextlib import nullcontext, contextmanager from torch import nn, Tensor from torch.nn import Module from torch.utils.data import Dataset, DataLoader from torch.optim.lr_scheduler import _LRScheduler from pytorch_custom_utils import ( get_adam_optimizer, OptimizerWithWarmupSchedule, add_wandb_tracker_contextmanager ) from accelerate import Accelerator from accelerate.utils import DistributedDataParallelKwargs from beartype import beartype from beartype.door import is_bearable from beartype.typing import Optional, Tuple, Type, List from ema_pytorch import EMA from meshgpt_pytorch.data import custom_collate from meshgpt_pytorch.version import __version__ from meshgpt_pytorch.meshgpt_pytorch import ( MeshAutoencoder, MeshTransformer ) import torch import torch.nn.functional as F
12,244
data_kwargs: Tuple[str, ...] = ['vertices', 'faces', 'face_edges'], warmup_steps = 1000, use_wandb_tracking = False ): super().__init__() # experiment tracker self.use_wandb_tracking = use_wandb_tracking if use_wandb_tracking: accelerator_kwargs['log_with'] = 'wandb' if 'kwargs_handlers' not in accelerator_kwargs: accelerator_kwargs['kwargs_handlers'] = [DEFAULT_DDP_KWARGS] # accelerator self.accelerator = Accelerator(**accelerator_kwargs) self.model = model if self.is_main: self.ema_model = EMA(model, **ema_kwargs) self.optimizer = OptimizerWithWarmupSchedule( accelerator = self.accelerator, optimizer = get_adam_optimizer(model.parameters(), lr = learning_rate, wd = weight_decay, **optimizer_kwargs), scheduler = scheduler, scheduler_kwargs = scheduler_kwargs, warmup_steps = warmup_steps, max_grad_norm = max_grad_norm ) self.dataloader = DataLoader( dataset, batch_size = batch_size, shuffle = True, drop_last = True, collate_fn = partial(custom_collate, pad_id = model.pad_id) ) self.should_validate = exists(val_dataset) if self.should_validate: assert len(val_dataset) > 0, 'your validation dataset is empty' self.val_every = val_every self.val_num_batches = val_num_batches self.val_dataloader = DataLoader( val_dataset, batch_size = batch_size, shuffle = True, drop_last = True, collate_fn = partial(custom_collate, pad_id = model.pad_id) ) if hasattr(dataset, 'data_kwargs') and exists(dataset.data_kwargs): assert is_bearable(dataset.data_kwargs, List[str]) self.data_kwargs = dataset.data_kwargs else: self.data_kwargs = data_kwargs ( self.model, self.dataloader ) = self.accelerator.prepare( self.model, self.dataloader ) self.grad_accum_every = grad_accum_every self.num_train_steps = num_train_steps self.register_buffer('step', torch.tensor(0)) self.checkpoint_every = checkpoint_every self.checkpoint_folder = Path(checkpoint_folder) self.checkpoint_folder.mkdir(exist_ok = True, parents = True) @property def ema_tokenizer(self): return self.ema_model.ema_model def tokenize(self, *args, **kwargs): return self.ema_tokenizer.tokenize(*args, **kwargs) def log(self, **data_kwargs): self.accelerator.log(data_kwargs, step = self.step.item()) @property def device(self): return self.unwrapped_model.device @property def is_main(self): return self.accelerator.is_main_process @property def unwrapped_model(self): return self.accelerator.unwrap_model(self.model) @property def is_local_main(self): return self.accelerator.is_local_main_process def wait(self): return self.accelerator.wait_for_everyone() def print(self, msg): return self.accelerator.print(msg) def save(self, path, overwrite = True): path = Path(path) assert overwrite or not path.exists() pkg = dict( model = self.unwrapped_model.state_dict(), ema_model = self.ema_model.state_dict(), optimizer = self.optimizer.state_dict(),
# constants DEFAULT_DDP_KWARGS = DistributedDataParallelKwargs( find_unused_parameters = True ) # helper functions def exists(v): return v is not None def default(v, d): return v if exists(v) else d def divisible_by(num, den): return (num % den) == 0 def cycle(dl): while True: for data in dl: yield data def maybe_del(d: dict, *keys): for key in keys: if key not in d: continue del d[key] # autoencoder trainer @add_wandb_tracker_contextmanager() class MeshAutoencoderTrainer(Module): @beartype def __init__( self, model: MeshAutoencoder, dataset: Dataset, num_train_steps: int, batch_size: int, grad_accum_every: int, val_dataset: Optional[Dataset] = None, val_every: int = 100, val_num_batches: int = 5, learning_rate: float = 1e-4, weight_decay: float = 0., max_grad_norm: Optional[float] = None, ema_kwargs: dict = dict(), scheduler: Optional[Type[_LRScheduler]] = None, scheduler_kwargs: dict = dict(), accelerator_kwargs: dict = dict(), optimizer_kwargs: dict = dict(), checkpoint_every = 1000, checkpoint_folder = './checkpoints', data_kwargs: Tuple[str, ...] = ['vertices', 'faces', 'face_edges'], warmup_steps = 1000, use_wandb_tracking = False ): super().__init__() # experiment tracker self.use_wandb_tracking = use_wandb_tracking if use_wandb_tracking: accelerator_kwargs['log_with'] = 'wandb' if 'kwargs_handlers' not in accelerator_kwargs: accelerator_kwargs['kwargs_handlers'] = [DEFAULT_DDP_KWARGS] # accelerator self.accelerator = Accelerator(**accelerator_kwargs) self.model = model if self.is_main: self.ema_model = EMA(model, **ema_kwargs) self.optimizer = OptimizerWithWarmupSchedule( accelerator = self.accelerator, optimizer = get_adam_optimizer(model.parameters(), lr = learning_rate, wd = weight_decay, **optimizer_kwargs), scheduler = scheduler, scheduler_kwargs = scheduler_kwargs, warmup_steps = warmup_steps, max_grad_norm = max_grad_norm ) self.dataloader = DataLoader( dataset, batch_size = batch_size, shuffle = True, drop_last = True, collate_fn = partial(custom_collate, pad_id = model.pad_id) ) self.should_validate = exists(val_dataset) if self.should_validate: assert len(val_dataset) > 0, 'your validation dataset is empty' self.val_every = val_every self.val_num_batches = val_num_batches self.val_dataloader = DataLoader( val_dataset, batch_size = batch_size, shuffle = True, drop_last = True, collate_fn = partial(custom_collate, pad_id = model.pad_id) ) if hasattr(dataset, 'data_kwargs') and exists(dataset.data_kwargs): assert is_bearable(dataset.data_kwargs, List[str]) self.data_kwargs = dataset.data_kwargs else: self.data_kwargs = data_kwargs ( self.model, self.dataloader ) = self.accelerator.prepare( self.model, self.dataloader ) self.grad_accum_every = grad_accum_every self.num_train_steps = num_train_steps self.register_buffer('step', torch.tensor(0)) self.checkpoint_every = checkpoint_every self.checkpoint_folder = Path(checkpoint_folder) self.checkpoint_folder.mkdir(exist_ok = True, parents = True) @property def ema_tokenizer(self): return self.ema_model.ema_model def tokenize(self, *args, **kwargs): return self.ema_tokenizer.tokenize(*args, **kwargs) def log(self, **data_kwargs): self.accelerator.log(data_kwargs, step = self.step.item()) @property def device(self): return self.unwrapped_model.device @property def is_main(self): return self.accelerator.is_main_process @property def unwrapped_model(self): return self.accelerator.unwrap_model(self.model) @property def is_local_main(self): return self.accelerator.is_local_main_process def wait(self): return self.accelerator.wait_for_everyone() def print(self, msg): return self.accelerator.print(msg) def save(self, path, overwrite = True): path = Path(path) assert overwrite or not path.exists() pkg = dict( model = self.unwrapped_model.state_dict(), ema_model = self.ema_model.state_dict(), optimizer = self.optimizer.state_dict(),
version = __version__,
1
2023-11-29 14:58:15+00:00
16k
EricGuo5513/momask-codes
train_res_transformer.py
[ { "identifier": "ResidualTransformer", "path": "models/mask_transformer/transformer.py", "snippet": "class ResidualTransformer(nn.Module):\n def __init__(self, code_dim, cond_mode, latent_dim=256, ff_size=1024, num_layers=8, cond_drop_prob=0.1,\n num_heads=4, dropout=0.1, clip_dim=512...
import os import torch import numpy as np from torch.utils.data import DataLoader from os.path import join as pjoin from models.mask_transformer.transformer import ResidualTransformer from models.mask_transformer.transformer_trainer import ResidualTransformerTrainer from models.vq.model import RVQVAE from options.train_option import TrainT2MOptions from utils.plot_script import plot_3d_motion from utils.motion_process import recover_from_ric from utils.get_opt import get_opt from utils.fixseed import fixseed from utils.paramUtil import t2m_kinematic_chain, kit_kinematic_chain from data.t2m_dataset import Text2MotionDataset from motion_loaders.dataset_motion_loader import get_dataset_motion_loader from models.t2m_eval_wrapper import EvaluatorModelWrapper
12,538
def plot_t2m(data, save_dir, captions, m_lengths): data = train_dataset.inv_transform(data) # print(ep_curves.shape) for i, (caption, joint_data) in enumerate(zip(captions, data)): joint_data = joint_data[:m_lengths[i]]
def plot_t2m(data, save_dir, captions, m_lengths): data = train_dataset.inv_transform(data) # print(ep_curves.shape) for i, (caption, joint_data) in enumerate(zip(captions, data)): joint_data = joint_data[:m_lengths[i]]
joint = recover_from_ric(torch.from_numpy(joint_data).float(), opt.joints_num).numpy()
5
2023-11-29 19:21:27+00:00
16k
dvlab-research/LLMGA
llmga/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_pix2pix_zero.py
[ { "identifier": "TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS", "path": "llmga/diffusers/tests/pipelines/pipeline_params.py", "snippet": "TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS = frozenset([\"prompt\", \"image\", \"negative_prompt\"])" }, { "identifier": "TEXT_GUIDED_IMAGE_VARIATION_PARAMS", "...
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DDPMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, StableDiffusionPix2PixZeroPipeline, UNet2DConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, load_pt, nightly, require_torch_gpu, skip_mps, torch_device, ) from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, ) from ..test_pipelines_common import ( PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, )
11,361
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_inversion_batch(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inversion_inputs(device) image = sd_pipe.invert(**inputs).images image_slice = image[1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) expected_slice = np.array([0.6046, 0.5400, 0.4902, 0.4448, 0.4694, 0.5498, 0.4857, 0.5073, 0.5089]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4863, 0.5053, 0.5033, 0.4007, 0.3571, 0.4768, 0.5176, 0.5277, 0.4940]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5177, 0.5097, 0.5047, 0.4076, 0.3667, 0.4767, 0.5238, 0.5307, 0.4958]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = EulerAncestralDiscreteScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear" ) sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5421, 0.5525, 0.6085, 0.5279, 0.4658, 0.5317, 0.4418, 0.4815, 0.5132]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_ddpm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = DDPMScheduler() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4861, 0.5053, 0.5038, 0.3994, 0.3562, 0.4768, 0.5172, 0.5280, 0.4938]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_inversion_pt_np_pil_outputs_equivalent(self): device = torch_device components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) output_pt = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, output_type="pt")).images output_np = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, output_type="np")).images output_pil = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, output_type="pil")).images max_diff = np.abs(output_pt.cpu().numpy().transpose(0, 2, 3, 1) - output_np).max() self.assertLess(max_diff, 1e-4, "`output_type=='pt'` generate different results from `output_type=='np'`") max_diff = np.abs(np.array(output_pil[0]) - (output_np[0] * 255).round()).max() self.assertLess(max_diff, 2.0, "`output_type=='pil'` generate different results from `output_type=='np'`") def test_stable_diffusion_pix2pix_zero_inversion_pt_np_pil_inputs_equivalent(self): device = torch_device components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) out_input_pt = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, input_image_type="pt")).images out_input_np = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, input_image_type="np")).images out_input_pil = sd_pipe.invert( **self.get_dummy_inversion_inputs_by_type(device, input_image_type="pil") ).images max_diff = np.abs(out_input_pt - out_input_np).max() self.assertLess(max_diff, 1e-4, "`input_type=='pt'` generate different result from `input_type=='np'`")
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. enable_full_determinism() @skip_mps class StableDiffusionPix2PixZeroPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = StableDiffusionPix2PixZeroPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"image"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def setUpClass(cls): cls.source_embeds = load_pt( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/src_emb_0.pt" ) cls.target_embeds = load_pt( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/tgt_emb_0.pt" ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler() inverse_scheduler = DDIMInverseScheduler() torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "inverse_scheduler": inverse_scheduler, "caption_generator": None, "caption_processor": None, } return components def get_dummy_inputs(self, device, seed=0): generator = torch.manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "cross_attention_guidance_amount": 0.15, "source_embeds": self.source_embeds, "target_embeds": self.target_embeds, "output_type": "numpy", } return inputs def get_dummy_inversion_inputs(self, device, seed=0): dummy_image = floats_tensor((2, 3, 32, 32), rng=random.Random(seed)).to(torch_device) dummy_image = dummy_image / 2 + 0.5 generator = torch.manual_seed(seed) inputs = { "prompt": [ "A painting of a squirrel eating a burger", "A painting of a burger eating a squirrel", ], "image": dummy_image.cpu(), "num_inference_steps": 2, "guidance_scale": 6.0, "generator": generator, "output_type": "numpy", } return inputs def get_dummy_inversion_inputs_by_type(self, device, seed=0, input_image_type="pt", output_type="np"): inputs = self.get_dummy_inversion_inputs(device, seed) if input_image_type == "pt": image = inputs["image"] elif input_image_type == "np": image = VaeImageProcessor.pt_to_numpy(inputs["image"]) elif input_image_type == "pil": image = VaeImageProcessor.pt_to_numpy(inputs["image"]) image = VaeImageProcessor.numpy_to_pil(image) else: raise ValueError(f"unsupported input_image_type {input_image_type}") inputs["image"] = image inputs["output_type"] = output_type return inputs def test_save_load_optional_components(self): if not hasattr(self.pipeline_class, "_optional_components"): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components}) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output - output_loaded).max() self.assertLess(max_diff, 1e-4) def test_stable_diffusion_pix2pix_zero_inversion(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inversion_inputs(device) inputs["image"] = inputs["image"][:1] inputs["prompt"] = inputs["prompt"][:1] image = sd_pipe.invert(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4732, 0.4630, 0.5722, 0.5103, 0.5140, 0.5622, 0.5104, 0.5390, 0.5020]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_inversion_batch(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inversion_inputs(device) image = sd_pipe.invert(**inputs).images image_slice = image[1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) expected_slice = np.array([0.6046, 0.5400, 0.4902, 0.4448, 0.4694, 0.5498, 0.4857, 0.5073, 0.5089]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4863, 0.5053, 0.5033, 0.4007, 0.3571, 0.4768, 0.5176, 0.5277, 0.4940]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5177, 0.5097, 0.5047, 0.4076, 0.3667, 0.4767, 0.5238, 0.5307, 0.4958]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = EulerAncestralDiscreteScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear" ) sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5421, 0.5525, 0.6085, 0.5279, 0.4658, 0.5317, 0.4418, 0.4815, 0.5132]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_ddpm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = DDPMScheduler() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4861, 0.5053, 0.5038, 0.3994, 0.3562, 0.4768, 0.5172, 0.5280, 0.4938]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_inversion_pt_np_pil_outputs_equivalent(self): device = torch_device components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) output_pt = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, output_type="pt")).images output_np = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, output_type="np")).images output_pil = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, output_type="pil")).images max_diff = np.abs(output_pt.cpu().numpy().transpose(0, 2, 3, 1) - output_np).max() self.assertLess(max_diff, 1e-4, "`output_type=='pt'` generate different results from `output_type=='np'`") max_diff = np.abs(np.array(output_pil[0]) - (output_np[0] * 255).round()).max() self.assertLess(max_diff, 2.0, "`output_type=='pil'` generate different results from `output_type=='np'`") def test_stable_diffusion_pix2pix_zero_inversion_pt_np_pil_inputs_equivalent(self): device = torch_device components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) out_input_pt = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, input_image_type="pt")).images out_input_np = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, input_image_type="np")).images out_input_pil = sd_pipe.invert( **self.get_dummy_inversion_inputs_by_type(device, input_image_type="pil") ).images max_diff = np.abs(out_input_pt - out_input_np).max() self.assertLess(max_diff, 1e-4, "`input_type=='pt'` generate different result from `input_type=='np'`")
assert_mean_pixel_difference(out_input_pil, out_input_np, expected_max_diff=1)
5
2023-11-27 18:46:55+00:00
16k
sherwinbahmani/4dfy
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio
12,966
isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: raise NotImplementedError def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: raise NotImplementedError def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale
points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1)
2
2023-11-29 05:15:56+00:00
16k
rlawjdghek/StableVITON
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools import torchvision.transforms as T import random import torch.nn.functional as F from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from torchvision.transforms.functional import resize from diffusers.models.autoencoder_kl import AutoencoderKLOutput from diffusers.models.vae import DecoderOutput from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like, zero_module, conv_nd from ldm.models.diffusion.ddim import DDIMSampler
12,922
else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]:
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, l_cond_simple_weight=1.0, l_cond_recon_weight=1.0, **kwargs ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.unet_config = unet_config self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.imagenet_norm = T.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight self.l_cond_simple_weight = l_cond_simple_weight self.l_cond_recon_weight = l_cond_recon_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}_loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}_loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}_loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): self.batch = batch for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, force_null_conditioning=False, *args, **kwargs): self.kwargs = kwargs self.force_null_conditioning = force_null_conditioning self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std self.cond_stage_trainable = cond_stage_trainable assert self.num_timesteps_cond <= kwargs['timesteps'] if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None if self.kwargs["use_imageCLIP"]: self.proj_out = nn.Linear(1024, 768) else: self.proj_out = None if self.use_pbe_weight: print("learnable vector gene") self.learnable_vector = nn.Parameter(torch.randn((1,1,768)), requires_grad=True) else: self.learnable_vector = None if self.kwargs["use_lastzc"]: # deprecated self.lastzc = zero_module(conv_nd(2, 4, 4, 1, 1, 0)) else: self.lastzc = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None else: model = instantiate_from_config(config) self.cond_stage_model = model else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior elif isinstance(encoder_posterior, AutoencoderKLOutput): z = encoder_posterior.latent_dist.sample() else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, return_x=False, no_latent=False, is_controlnet=False): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) if no_latent: _,_,h,w = x.shape x = resize(x, (h//8, w//8)) return [x, None] encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if is_controlnet and self.lastzc is not None: z = self.lastzc(z) if self.model.conditioning_key is not None and not self.force_null_conditioning: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox', "txt"]: xc = batch[cond_key] elif cond_key in ['class_label', 'cls']: xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if self.kwargs["use_imageCLIP"]: xc = resize(xc, (224,224)) xc = self.imagenet_norm((xc+1)/2) c = xc else: if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) c = c.float() else: if self.kwargs["use_imageCLIP"]: xc = resize(xc, (224,224)) xc = self.imagenet_norm((xc+1)/2) c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_x: out.extend([x]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z output = self.first_stage_model.decode(z) if not isinstance(output, DecoderOutput): return output else: return output.sample def decode_first_stage_train(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): if not self.use_pbe_weight: t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) # pbe negative condition else: t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() self.u_cond_prop=random.uniform(0, 1) c["c_crossattn"] = [self.get_learned_conditioning(c["c_crossattn"])] if self.u_cond_prop < self.u_cond_percent: c["c_crossattn"] = [self.learnable_vector.repeat(x.shape[0],1,1)] return self.p_losses(x, c, t, *args, **kwargs) def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): loss_dict = {} noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output, cond_output_dict = self.apply_model(x_noisy, t, cond) prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError() model_loss = None if isinstance(model_output, tuple): model_output, model_loss = model_output if self.only_agn_simple_loss: _, _, l_h, l_w = model_output.shape m_agn = F.interpolate(super().get_input(self.batch, "agn_mask"), (l_h, l_w)) loss_simple = self.get_loss(model_output * (1-m_agn), target * (1-m_agn), mean=False).mean([1, 2, 3]) else: loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() if self.original_elbo_weight != 0: loss_dict.update({f'loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) if model_loss is not None: loss += model_loss loss_dict.update({f"model loss" : model_loss}) loss_dict.update({f'{prefix}_loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out, cond_output_dict = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if isinstance(model_out, tuple): model_out, _ = model_out if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]:
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
0
2023-12-02 05:56:58+00:00
16k
AIFSH/NativeSpeaker
src/core.py
[ { "identifier": "HandleLog", "path": "src/log_helper.py", "snippet": "class HandleLog:\n \"\"\"\n 先创建日志记录器(logging.getLogger),然后再设置日志级别(logger.setLevel),\n 接着再创建日志文件,也就是日志保存的地方(logging.FileHandler),然后再设置日志格式(logging.Formatter),\n 最后再将日志处理程序记录到记录器(addHandler)\n \"\"\"\n\n def __init__(s...
import os import torch import soundfile as sf import gc; gc.collect(); torch.cuda.empty_cache(); del cloner import gc; gc.collect(); torch.cuda.empty_cache(); del diarize_model import gc; gc.collect(); torch.cuda.empty_cache(); del whisper from typing import Any from tqdm import tqdm from src.log_helper import HandleLog from moviepy.editor import VideoFileClip,concatenate_videoclips from pathlib import Path from pydub import AudioSegment from src.audio_bgm_split import AudioProcess from src.voice_clone import VoiceCloner from src.temp_manager import TempFileManager from src.translator import Translator from src.lipsync import LipSync from src.upscale import Upscale from src.nfsw import analyse_video from src.third_part.whisperx import load_model,load_audio,DiarizationPipeline
11,034
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com' logger = HandleLog() class Core: def __init__(self, args) -> None: cur_path = os.path.dirname(os.path.realpath(__file__)) # current path self.weights_path = os.path.join(os.path.dirname(cur_path), 'weights') # weights_path to save model if not os.path.exists(self.weights_path): os.mkdir(self.weights_path) # self.input_file = args.input_file_path self.output_file = args.output_file_path self.lang_code = args.lang_code self.device = "cuda" if torch.cuda.is_available() else "cpu" self.hf_token = args.hf_token self.temp_manager = TempFileManager() self.translotor = Translator() self.model_name = args.model_name self.xt_version_name = args.xt_version_name if analyse_video(args.input_file_path): raise("sorry! nativespeaker is not for you") def __call__(self, *args: Any, **kwds: Any) -> Any: logger.critical("[Step 1] Moviepy split voice and frames from video") org_voice_path = os.path.join(Path(self.input_file).parent, "org_voice.wav") org_video_clip = VideoFileClip(self.input_file) org_video_clip.audio.write_audiofile(org_voice_path,codec='pcm_s16le') logger.info("save original voice in {}".format(org_voice_path)) logger.critical("[Step 2] H5 Split vocal and bgm from voice") audio_process = AudioProcess(15) vocal_file, bgm_file = audio_process.split(org_voice_path) logger.critical("[Step 3] whisperx from speech to text") whispher_segments, src_lang_code, speakers_wav = self.speech_to_text(vocal_file) logger.critical("[Step 4] translate,text to speech,video and voice_cloned aligment") vocal_cloned_audio = AudioSegment.silent(0) bgm_audio_extend = AudioSegment.silent(0) video_extend_list = [] org_vocal = AudioSegment.from_file(vocal_file) bgm_audio = AudioSegment.from_file(bgm_file) seg_len = len(whispher_segments)
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com' logger = HandleLog() class Core: def __init__(self, args) -> None: cur_path = os.path.dirname(os.path.realpath(__file__)) # current path self.weights_path = os.path.join(os.path.dirname(cur_path), 'weights') # weights_path to save model if not os.path.exists(self.weights_path): os.mkdir(self.weights_path) # self.input_file = args.input_file_path self.output_file = args.output_file_path self.lang_code = args.lang_code self.device = "cuda" if torch.cuda.is_available() else "cpu" self.hf_token = args.hf_token self.temp_manager = TempFileManager() self.translotor = Translator() self.model_name = args.model_name self.xt_version_name = args.xt_version_name if analyse_video(args.input_file_path): raise("sorry! nativespeaker is not for you") def __call__(self, *args: Any, **kwds: Any) -> Any: logger.critical("[Step 1] Moviepy split voice and frames from video") org_voice_path = os.path.join(Path(self.input_file).parent, "org_voice.wav") org_video_clip = VideoFileClip(self.input_file) org_video_clip.audio.write_audiofile(org_voice_path,codec='pcm_s16le') logger.info("save original voice in {}".format(org_voice_path)) logger.critical("[Step 2] H5 Split vocal and bgm from voice") audio_process = AudioProcess(15) vocal_file, bgm_file = audio_process.split(org_voice_path) logger.critical("[Step 3] whisperx from speech to text") whispher_segments, src_lang_code, speakers_wav = self.speech_to_text(vocal_file) logger.critical("[Step 4] translate,text to speech,video and voice_cloned aligment") vocal_cloned_audio = AudioSegment.silent(0) bgm_audio_extend = AudioSegment.silent(0) video_extend_list = [] org_vocal = AudioSegment.from_file(vocal_file) bgm_audio = AudioSegment.from_file(bgm_file) seg_len = len(whispher_segments)
cloner = VoiceCloner(self.xt_version_name)
2
2023-12-01 12:23:19+00:00
16k
skhu101/GauHuman
scene/dataset_readers.py
[ { "identifier": "read_extrinsics_text", "path": "scene/colmap_loader.py", "snippet": "def read_extrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n images = {}\n with open(path, \"r\") as fid:\n while T...
import os import sys import numpy as np import torch import json import imageio import cv2 import random from PIL import Image from typing import NamedTuple from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \ read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal from pathlib import Path from plyfile import PlyData, PlyElement from utils.sh_utils import SH2RGB from scene.gaussian_model import BasicPointCloud from smpl.smpl_numpy import SMPL from smplx.body_models import SMPLX from data.dna_rendering.dna_rendering_sample_code.SMCReader import SMCReader
14,191
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact george.drettakis@inria.fr # class CameraInfo(NamedTuple): uid: int pose_id: int R: np.array T: np.array K: np.array FovY: np.array FovX: np.array image: np.array image_path: str image_name: str bkgd_mask: np.array bound_mask: np.array width: int height: int smpl_param: dict world_vertex: np.array world_bound: np.array big_pose_smpl_param: dict big_pose_world_vertex: np.array big_pose_world_bound: np.array class SceneInfo(NamedTuple): point_cloud: BasicPointCloud train_cameras: list test_cameras: list nerf_normalization: dict ply_path: str def getNerfppNorm(cam_info): def get_center_and_diag(cam_centers): cam_centers = np.hstack(cam_centers) avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True) center = avg_cam_center dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True) diagonal = np.max(dist) return center.flatten(), diagonal cam_centers = [] for cam in cam_info:
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact george.drettakis@inria.fr # class CameraInfo(NamedTuple): uid: int pose_id: int R: np.array T: np.array K: np.array FovY: np.array FovX: np.array image: np.array image_path: str image_name: str bkgd_mask: np.array bound_mask: np.array width: int height: int smpl_param: dict world_vertex: np.array world_bound: np.array big_pose_smpl_param: dict big_pose_world_vertex: np.array big_pose_world_bound: np.array class SceneInfo(NamedTuple): point_cloud: BasicPointCloud train_cameras: list test_cameras: list nerf_normalization: dict ply_path: str def getNerfppNorm(cam_info): def get_center_and_diag(cam_centers): cam_centers = np.hstack(cam_centers) avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True) center = avg_cam_center dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True) diagonal = np.max(dist) return center.flatten(), diagonal cam_centers = [] for cam in cam_info:
W2C = getWorld2View2(cam.R, cam.T)
7
2023-11-29 07:10:39+00:00
16k
emdgroup/baybe
tests/simulate_telemetry.py
[ { "identifier": "Campaign", "path": "baybe/campaign.py", "snippet": "class Campaign(SerialMixin):\n \"\"\"Main class for interaction with BayBE.\n\n Campaigns define and record an experimentation process, i.e. the execution of a\n series of measurements and the iterative sequence of events invo...
import os from random import randint from baybe.campaign import Campaign from baybe.objective import Objective from baybe.parameters import NumericalDiscreteParameter, SubstanceParameter from baybe.recommenders import RandomRecommender, SequentialGreedyRecommender from baybe.searchspace import SearchSpace from baybe.strategies import TwoPhaseStrategy from baybe.targets import NumericalTarget from baybe.telemetry import ( VARNAME_TELEMETRY_ENABLED, VARNAME_TELEMETRY_USERNAME, get_user_details, ) from baybe.utils.dataframe import add_fake_results
13,929
"""Simulate different users and telemetry settings. This script does some calls so that the results can be viewed on AWS CloudWatch. """ dict_solvent = { "DMAc": r"CC(N(C)C)=O", "Butyornitrile": r"CCCC#N", "Butyl Ester": r"CCCCOC(C)=O", "p-Xylene": r"CC1=CC=C(C)C=C1", } dict_base = { "Potassium acetate": r"O=C([O-])C.[K+]", "Potassium pivalate": r"O=C([O-])C(C)(C)C.[K+]", "Cesium acetate": r"O=C([O-])C.[Cs+]", "Cesium pivalate": r"O=C([O-])C(C)(C)C.[Cs+]", } dict_ligand = { "BrettPhos": r"CC(C)C1=CC(C(C)C)=C(C(C(C)C)=C1)C2=C(P(C3CCCCC3)C4CCCCC4)C(OC)=" "CC=C2OC", "Di-tert-butylphenylphosphine": r"CC(C)(C)P(C1=CC=CC=C1)C(C)(C)C", "(t-Bu)PhCPhos": r"CN(C)C1=CC=CC(N(C)C)=C1C2=CC=CC=C2P(C(C)(C)C)C3=CC=CC=C3", "Tricyclohexylphosphine": r"P(C1CCCCC1)(C2CCCCC2)C3CCCCC3", "PPh3": r"P(C1=CC=CC=C1)(C2=CC=CC=C2)C3=CC=CC=C3", "XPhos": r"CC(C1=C(C2=CC=CC=C2P(C3CCCCC3)C4CCCCC4)C(C(C)C)=CC(C(C)C)=C1)C", "P(2-furyl)3": r"P(C1=CC=CO1)(C2=CC=CO2)C3=CC=CO3", "Methyldiphenylphosphine": r"CP(C1=CC=CC=C1)C2=CC=CC=C2", "1268824-69-6": r"CC(OC1=C(P(C2CCCCC2)C3CCCCC3)C(OC(C)C)=CC=C1)C", "JackiePhos": r"FC(F)(F)C1=CC(P(C2=C(C3=C(C(C)C)C=C(C(C)C)C=C3C(C)C)C(OC)=CC=C2OC)" r"C4=CC(C(F)(F)F)=CC(C(F)(F)F)=C4)=CC(C(F)(F)F)=C1", "SCHEMBL15068049": r"C[C@]1(O2)O[C@](C[C@]2(C)P3C4=CC=CC=C4)(C)O[C@]3(C)C1", "Me2PPh": r"CP(C)C1=CC=CC=C1", } parameters = [ SubstanceParameter(name="Solvent", data=dict_solvent, encoding="MORDRED"), SubstanceParameter(name="Base", data=dict_base, encoding="MORDRED"), SubstanceParameter(name="Ligand", data=dict_ligand, encoding="MORDRED"), NumericalDiscreteParameter(name="Temp_C", values=[90, 105, 120], tolerance=2), NumericalDiscreteParameter( name="Concentration", values=[0.057, 0.1, 0.153], tolerance=0.005 ), ] config = { "searchspace": SearchSpace.from_product( parameters=parameters, constraints=None, ), "objective": Objective(
"""Simulate different users and telemetry settings. This script does some calls so that the results can be viewed on AWS CloudWatch. """ dict_solvent = { "DMAc": r"CC(N(C)C)=O", "Butyornitrile": r"CCCC#N", "Butyl Ester": r"CCCCOC(C)=O", "p-Xylene": r"CC1=CC=C(C)C=C1", } dict_base = { "Potassium acetate": r"O=C([O-])C.[K+]", "Potassium pivalate": r"O=C([O-])C(C)(C)C.[K+]", "Cesium acetate": r"O=C([O-])C.[Cs+]", "Cesium pivalate": r"O=C([O-])C(C)(C)C.[Cs+]", } dict_ligand = { "BrettPhos": r"CC(C)C1=CC(C(C)C)=C(C(C(C)C)=C1)C2=C(P(C3CCCCC3)C4CCCCC4)C(OC)=" "CC=C2OC", "Di-tert-butylphenylphosphine": r"CC(C)(C)P(C1=CC=CC=C1)C(C)(C)C", "(t-Bu)PhCPhos": r"CN(C)C1=CC=CC(N(C)C)=C1C2=CC=CC=C2P(C(C)(C)C)C3=CC=CC=C3", "Tricyclohexylphosphine": r"P(C1CCCCC1)(C2CCCCC2)C3CCCCC3", "PPh3": r"P(C1=CC=CC=C1)(C2=CC=CC=C2)C3=CC=CC=C3", "XPhos": r"CC(C1=C(C2=CC=CC=C2P(C3CCCCC3)C4CCCCC4)C(C(C)C)=CC(C(C)C)=C1)C", "P(2-furyl)3": r"P(C1=CC=CO1)(C2=CC=CO2)C3=CC=CO3", "Methyldiphenylphosphine": r"CP(C1=CC=CC=C1)C2=CC=CC=C2", "1268824-69-6": r"CC(OC1=C(P(C2CCCCC2)C3CCCCC3)C(OC(C)C)=CC=C1)C", "JackiePhos": r"FC(F)(F)C1=CC(P(C2=C(C3=C(C(C)C)C=C(C(C)C)C=C3C(C)C)C(OC)=CC=C2OC)" r"C4=CC(C(F)(F)F)=CC(C(F)(F)F)=C4)=CC(C(F)(F)F)=C1", "SCHEMBL15068049": r"C[C@]1(O2)O[C@](C[C@]2(C)P3C4=CC=CC=C4)(C)O[C@]3(C)C1", "Me2PPh": r"CP(C)C1=CC=CC=C1", } parameters = [ SubstanceParameter(name="Solvent", data=dict_solvent, encoding="MORDRED"), SubstanceParameter(name="Base", data=dict_base, encoding="MORDRED"), SubstanceParameter(name="Ligand", data=dict_ligand, encoding="MORDRED"), NumericalDiscreteParameter(name="Temp_C", values=[90, 105, 120], tolerance=2), NumericalDiscreteParameter( name="Concentration", values=[0.057, 0.1, 0.153], tolerance=0.005 ), ] config = { "searchspace": SearchSpace.from_product( parameters=parameters, constraints=None, ), "objective": Objective(
mode="SINGLE", targets=[NumericalTarget(name="Yield", mode="MAX")]
8
2023-11-27 17:02:40+00:00
16k
UX-Decoder/LLaVA-Grounding
llava/model/language_model/llava_llama_gd.py
[ { "identifier": "LlavaMetaModel", "path": "llava/model/llava_arch.py", "snippet": "class LlavaMetaModel:\n\n def __init__(self, config):\n super(LlavaMetaModel, self).__init__(config)\n\n if hasattr(config, \"mm_vision_tower\"):\n self.vision_tower = build_vision_tower(config...
from typing import List, Optional, Tuple, Union from torch.nn import CrossEntropyLoss from transformers import AutoConfig, AutoModelForCausalLM, \ LlamaConfig, LlamaModel, LlamaForCausalLM from transformers.modeling_outputs import CausalLMOutputWithPast from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM, LlavaMetaForCausalLM_gd,LlavaMetaForCausalLM_gd_interactive import torch import torch.nn as nn import transformers
13,049
) if 'image_clip' in instances[0]: images = [instance['image_clip'] for instance in instances] if all(x is not None and x.shape == images[0].shape for x in images): batch['images'] = torch.stack(images) else: batch['images'] = images return batch class LlavaConfig(LlamaConfig): model_type = "llava" class LlavaLlamaModel(LlavaMetaModel, LlamaModel): config_class = LlavaConfig def __init__(self, config: LlamaConfig): super(LlavaLlamaModel, self).__init__(config) class LlavaLlamaForCausalLM(LlamaForCausalLM, LlavaMetaForCausalLM): config_class = LlavaConfig def __init__(self, config): super(LlamaForCausalLM, self).__init__(config) self.model = LlavaLlamaModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_model(self): return self.model def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, images: Optional[torch.FloatTensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model/pipeline parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs ): if past_key_values: input_ids = input_ids[:, -1:] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, "images": kwargs.get("images", None), } ) return model_inputs
# Copyright 2023 Haotian Liu # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. IGNORE_INDEX=-100 # @dataclass class DataCollatorForSupervisedDataset(object): """Collate examples for supervised fine-tuning.""" # tokenizer: transformers.PreTrainedTokenizer def __call__(self, instances,tokenizer): input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels")) input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id) labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX) input_ids = input_ids[:, :tokenizer.model_max_length] labels = labels[:, :tokenizer.model_max_length] batch = dict( input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(tokenizer.pad_token_id), ) if 'image_clip' in instances[0]: images = [instance['image_clip'] for instance in instances] if all(x is not None and x.shape == images[0].shape for x in images): batch['images'] = torch.stack(images) else: batch['images'] = images return batch class LlavaConfig(LlamaConfig): model_type = "llava" class LlavaLlamaModel(LlavaMetaModel, LlamaModel): config_class = LlavaConfig def __init__(self, config: LlamaConfig): super(LlavaLlamaModel, self).__init__(config) class LlavaLlamaForCausalLM(LlamaForCausalLM, LlavaMetaForCausalLM): config_class = LlavaConfig def __init__(self, config): super(LlamaForCausalLM, self).__init__(config) self.model = LlavaLlamaModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_model(self): return self.model def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, images: Optional[torch.FloatTensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model/pipeline parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs ): if past_key_values: input_ids = input_ids[:, -1:] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, "images": kwargs.get("images", None), } ) return model_inputs
class LlavaLlamaForCausalLM_gd(LlamaForCausalLM, LlavaMetaForCausalLM_gd):
2
2023-12-04 10:59:21+00:00
16k
daveredrum/SceneTex
models/pipeline/texture_pipeline.py
[ { "identifier": "TextureMesh", "path": "models/modules/meshes.py", "snippet": "class TextureMesh(nn.Module):\n def __init__(self, \n config,\n device\n ): \n \n super().__init__()\n \n self.config = config\n self.device = device\n\n self.num_...
import random import wandb import json import os import time import torch import torch.nn as nn import torch.nn.functional as F import torchvision import numpy as np import pytorch_lightning as pl import matplotlib.pyplot as plt import sys import open_clip from torch.optim import Adam, AdamW from torch.optim.lr_scheduler import LinearLR from omegaconf import OmegaConf from tqdm import tqdm from omegaconf import OmegaConf from PIL import Image from copy import deepcopy from pathlib import Path from pytorch3d.io import ( load_obj, load_objs_as_meshes ) from pytorch3d.renderer import TexturesUV from pytorch3d.ops import interpolate_face_attributes from models.modules import TextureMesh, Studio, Guidance
10,837
# mat # customized sys.path.append("./lib") class TexturePipeline(nn.Module): def __init__(self, config, stamp, device ): super().__init__() self.config = config self.stamp = stamp self.prompt = config.prompt + ", " + config.a_prompt if config.a_prompt else config.prompt self.n_prompt = config.n_prompt self.device = device self.weights_dtype = torch.float16 if self.config.enable_half_precision else torch.float32 print("=> Use precision: {}".format(self.weights_dtype)) pl.seed_everything(self.config.seed) """call this after to(device)""" def configure(self, inference_mode=False): if not inference_mode: self.log_name = "_".join(self.config.prompt.split(' ')) self.log_stamp = self.stamp self.log_dir = os.path.join(self.config.log_dir, self.log_name, self.config.loss_type, self.log_stamp) # override config self.config.log_name = self.log_name self.config.log_stamp = self.log_stamp self.config.log_dir = self.log_dir # 3D assets self._init_mesh() # studio self._init_studio() # instances self._init_anchors() if not inference_mode: # diffusion self._init_guidance() # optimization self._configure_optimizers() self._init_logger() if self.config.enable_clip_benchmark: self.clip, _, self.clip_preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k') self.clip_tokenizer = open_clip.get_tokenizer('ViT-B-32') def _init_studio(self): self.studio = Studio(self.config, self.device) def _init_mesh(self):
# mat # customized sys.path.append("./lib") class TexturePipeline(nn.Module): def __init__(self, config, stamp, device ): super().__init__() self.config = config self.stamp = stamp self.prompt = config.prompt + ", " + config.a_prompt if config.a_prompt else config.prompt self.n_prompt = config.n_prompt self.device = device self.weights_dtype = torch.float16 if self.config.enable_half_precision else torch.float32 print("=> Use precision: {}".format(self.weights_dtype)) pl.seed_everything(self.config.seed) """call this after to(device)""" def configure(self, inference_mode=False): if not inference_mode: self.log_name = "_".join(self.config.prompt.split(' ')) self.log_stamp = self.stamp self.log_dir = os.path.join(self.config.log_dir, self.log_name, self.config.loss_type, self.log_stamp) # override config self.config.log_name = self.log_name self.config.log_stamp = self.log_stamp self.config.log_dir = self.log_dir # 3D assets self._init_mesh() # studio self._init_studio() # instances self._init_anchors() if not inference_mode: # diffusion self._init_guidance() # optimization self._configure_optimizers() self._init_logger() if self.config.enable_clip_benchmark: self.clip, _, self.clip_preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k') self.clip_tokenizer = open_clip.get_tokenizer('ViT-B-32') def _init_studio(self): self.studio = Studio(self.config, self.device) def _init_mesh(self):
self.texture_mesh = TextureMesh(self.config, self.device)
0
2023-11-28 15:38:40+00:00
16k
HyeonHo99/Video-Motion-Customization
showone/models/unet_3d_condition.py
[ { "identifier": "TransformerTemporalModel", "path": "showone/models/transformer_temporal.py", "snippet": "class TransformerTemporalModel(ModelMixin, ConfigMixin):\n \"\"\"\n A Transformer model for video-like data.\n\n Parameters:\n num_attention_heads (`int`, *optional*, defaults to 16)...
from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.loaders import UNet2DConditionLoadersMixin from diffusers.utils import BaseOutput, logging from diffusers.models.activations import get_activation from diffusers.models.attention_processor import AttentionProcessor, AttnProcessor from diffusers.models.embeddings import ( GaussianFourierProjection, ImageHintTimeEmbedding, ImageProjection, ImageTimeEmbedding, TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps, ) from diffusers.models.modeling_utils import ModelMixin from .transformer_temporal import TransformerTemporalModel from .unet_3d_blocks import ( CrossAttnDownBlock3D, CrossAttnUpBlock3D, DownBlock3D, UNetMidBlock3DCrossAttn, UNetMidBlock3DSimpleCrossAttn, UpBlock3D, get_down_block, get_up_block, ) from diffusers.utils import WEIGHTS_NAME import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint import os, json
13,790
# The projection `class_embed_type` is the same as the timestep `class_embed_type` except # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings # 2. it projects from an arbitrary input dimension. # # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. # As a result, `TimestepEmbedding` can be passed arbitrary vectors. self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif class_embed_type == "simple_projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set" ) self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) else: self.class_embedding = None if addition_embed_type == "text": if encoder_hid_dim is not None: text_time_embedding_from_dim = encoder_hid_dim else: text_time_embedding_from_dim = cross_attention_dim self.add_embedding = TextTimeEmbedding( text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads ) elif addition_embed_type == "text_image": # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)` self.add_embedding = TextImageTimeEmbedding( text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim ) elif addition_embed_type == "text_time": self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif addition_embed_type == "image": # Kandinsky 2.2 self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type == "image_hint": # Kandinsky 2.2 ControlNet self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type is not None: raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") if time_embedding_act_fn is None: self.time_embed_act = None else: self.time_embed_act = get_activation(time_embedding_act_fn) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): if mid_block_only_cross_attention is None: mid_block_only_cross_attention = only_cross_attention only_cross_attention = [only_cross_attention] * len(down_block_types) if mid_block_only_cross_attention is None: mid_block_only_cross_attention = False if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if class_embeddings_concat: # The time embeddings are concatenated with the class embeddings. The dimension of the # time embeddings passed to the down, middle, and up blocks is twice the dimension of the # regular time embeddings blocks_time_embed_dim = time_embed_dim * 2 else: blocks_time_embed_dim = time_embed_dim # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=blocks_time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim[i], num_attention_heads=num_attention_heads[i], downsample_padding=downsample_padding, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, ) self.down_blocks.append(down_block) # mid if mid_block_type == "UNetMidBlock3DCrossAttn":
# Copyright 2023 Alibaba DAMO-VILAB and The HuggingFace Team. All rights reserved. # Copyright 2023 The ModelScope Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from diffusers.models.transformer_temporal import TransformerTemporalModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class UNet3DConditionOutput(BaseOutput): """ Args: sample (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`): Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model. """ sample: torch.FloatTensor class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): r""" UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep and returns sample shaped output. This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library implements for all the models (such as downloading or saving, etc.) Parameters: sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): Height and width of input/output sample. in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): The number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`): The tuple of upsample blocks to use. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. If `None`, it will skip the normalization and activation layers in post-processing norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features. attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, sample_size: Optional[int] = None, in_channels: int = 4, out_channels: int = 4, center_input_sample: bool = False, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str] = ( "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D", ), mid_block_type: Optional[str] = "UNetMidBlock3DCrossAttn", up_block_types: Tuple[str] = ("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D"), only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int] = (320, 640, 1280, 1280), layers_per_block: Union[int, Tuple[int]] = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, act_fn: str = "silu", norm_num_groups: Optional[int] = 32, norm_eps: float = 1e-5, cross_attention_dim: Union[int, Tuple[int]] = 1280, transformer_layers_per_block: Union[int, Tuple[int]] = 1, encoder_hid_dim: Optional[int] = None, encoder_hid_dim_type: Optional[str] = None, attention_head_dim: Union[int, Tuple[int]] = 8, num_attention_heads: Optional[Union[int, Tuple[int]]] = None, dual_cross_attention: bool = False, use_linear_projection: bool = False, class_embed_type: Optional[str] = None, addition_embed_type: Optional[str] = None, addition_time_embed_dim: Optional[int] = None, num_class_embeds: Optional[int] = None, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", resnet_skip_time_act: bool = False, resnet_out_scale_factor: int = 1.0, time_embedding_type: str = "positional", time_embedding_dim: Optional[int] = None, time_embedding_act_fn: Optional[str] = None, timestep_post_act: Optional[str] = None, time_cond_proj_dim: Optional[int] = None, conv_in_kernel: int = 3, conv_out_kernel: int = 3, projection_class_embeddings_input_dim: Optional[int] = None, class_embeddings_concat: bool = False, mid_block_only_cross_attention: Optional[bool] = None, cross_attention_norm: Optional[str] = None, addition_embed_type_num_heads=64, transfromer_in_opt: bool =False, ): super().__init__() self.sample_size = sample_size self.transformer_in_opt = transfromer_in_opt if num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = num_attention_heads or attention_head_dim # Check inputs if len(down_block_types) != len(up_block_types): raise ValueError( f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." ) if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): raise ValueError( f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." ) if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." ) if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): raise ValueError( f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." ) # input conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding ) if self.transformer_in_opt: self.transformer_in = TransformerTemporalModel( num_attention_heads=8, attention_head_dim=64, in_channels=block_out_channels[0], num_layers=1, ) # time if time_embedding_type == "fourier": time_embed_dim = time_embedding_dim or block_out_channels[0] * 2 if time_embed_dim % 2 != 0: raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.") self.time_proj = GaussianFourierProjection( time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos ) timestep_input_dim = time_embed_dim elif time_embedding_type == "positional": time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] else: raise ValueError( f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`." ) self.time_embedding = TimestepEmbedding( timestep_input_dim, time_embed_dim, act_fn=act_fn, post_act_fn=timestep_post_act, cond_proj_dim=time_cond_proj_dim, ) if encoder_hid_dim_type is None and encoder_hid_dim is not None: encoder_hid_dim_type = "text_proj" self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") if encoder_hid_dim is None and encoder_hid_dim_type is not None: raise ValueError( f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}." ) if encoder_hid_dim_type == "text_proj": self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) elif encoder_hid_dim_type == "text_image_proj": # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)` self.encoder_hid_proj = TextImageProjection( text_embed_dim=encoder_hid_dim, image_embed_dim=cross_attention_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type == "image_proj": # Kandinsky 2.2 self.encoder_hid_proj = ImageProjection( image_embed_dim=encoder_hid_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type is not None: raise ValueError( f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'." ) else: self.encoder_hid_proj = None # class embedding if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == "timestep": self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) elif class_embed_type == "identity": self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) elif class_embed_type == "projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" ) # The projection `class_embed_type` is the same as the timestep `class_embed_type` except # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings # 2. it projects from an arbitrary input dimension. # # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. # As a result, `TimestepEmbedding` can be passed arbitrary vectors. self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif class_embed_type == "simple_projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set" ) self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) else: self.class_embedding = None if addition_embed_type == "text": if encoder_hid_dim is not None: text_time_embedding_from_dim = encoder_hid_dim else: text_time_embedding_from_dim = cross_attention_dim self.add_embedding = TextTimeEmbedding( text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads ) elif addition_embed_type == "text_image": # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)` self.add_embedding = TextImageTimeEmbedding( text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim ) elif addition_embed_type == "text_time": self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif addition_embed_type == "image": # Kandinsky 2.2 self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type == "image_hint": # Kandinsky 2.2 ControlNet self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type is not None: raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") if time_embedding_act_fn is None: self.time_embed_act = None else: self.time_embed_act = get_activation(time_embedding_act_fn) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): if mid_block_only_cross_attention is None: mid_block_only_cross_attention = only_cross_attention only_cross_attention = [only_cross_attention] * len(down_block_types) if mid_block_only_cross_attention is None: mid_block_only_cross_attention = False if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if class_embeddings_concat: # The time embeddings are concatenated with the class embeddings. The dimension of the # time embeddings passed to the down, middle, and up blocks is twice the dimension of the # regular time embeddings blocks_time_embed_dim = time_embed_dim * 2 else: blocks_time_embed_dim = time_embed_dim # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=blocks_time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim[i], num_attention_heads=num_attention_heads[i], downsample_padding=downsample_padding, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, ) self.down_blocks.append(down_block) # mid if mid_block_type == "UNetMidBlock3DCrossAttn":
self.mid_block = UNetMidBlock3DCrossAttn(
4
2023-11-29 17:23:45+00:00
16k
xmu-xiaoma666/X-Dreamer
train_x_dreamer.py
[ { "identifier": "DatasetMesh", "path": "dataset/dataset_mesh.py", "snippet": "class DatasetMesh(torch.utils.data.Dataset):\n\n\n def __init__(self, glctx, FLAGS, validate=False, gif=False):\n # Init \n self.glctx = glctx\n self.FLAGS = FLAGS\n sel...
import os import time import argparse import json import math import numpy as np import torch import nvdiffrast.torch as dr import itertools import xatlas import open3d as o3d import random import imageio import os.path as osp import pickle from dataset.dataset_mesh import DatasetMesh from dataset.dataset_mesh import get_camera_params from geometry.dmtet_x_dreamer import DMTetGeometry from geometry.dlmesh_x_dreamer import DLMesh from render import obj from render import material from render import util from render import mesh from render import texture from render import mlptexture from render import light from render import render from sd_cglora import StableDiffusion from tqdm import tqdm from render import util from render.video import Video
11,894
############################################################################### # Mix background into a dataset image ############################################################################### @torch.no_grad() def prepare_batch(target, background= 'black'): target['mv'] = target['mv'].cuda() target['mvp'] = target['mvp'].cuda() target['campos'] = target['campos'].cuda() target['fov'] = target['fov'].cuda() target['normal_rotate'] = target['normal_rotate'].cuda() batch_size = target['mv'].shape[0] resolution = target['resolution'] if background == 'white': target['background']= torch.ones(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') if background == 'black': target['background'] = torch.zeros(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') return target ############################################################################### # UV - map geometry & convert to a mesh ############################################################################### @torch.no_grad() def xatlas_uvmap(glctx, geometry, mat, FLAGS): eval_mesh = geometry.getMesh(mat) # Create uvs with xatlas v_pos = eval_mesh.v_pos.detach().cpu().numpy() t_pos_idx = eval_mesh.t_pos_idx.detach().cpu().numpy() vmapping, indices, uvs = xatlas.parametrize(v_pos, t_pos_idx) # Convert to tensors indices_int64 = indices.astype(np.uint64, casting='same_kind').view(np.int64) uvs = torch.tensor(uvs, dtype=torch.float32, device='cuda') faces = torch.tensor(indices_int64, dtype=torch.int64, device='cuda') new_mesh = mesh.Mesh(v_tex=uvs, t_tex_idx=faces, base=eval_mesh)
############################################################################### # Mix background into a dataset image ############################################################################### @torch.no_grad() def prepare_batch(target, background= 'black'): target['mv'] = target['mv'].cuda() target['mvp'] = target['mvp'].cuda() target['campos'] = target['campos'].cuda() target['fov'] = target['fov'].cuda() target['normal_rotate'] = target['normal_rotate'].cuda() batch_size = target['mv'].shape[0] resolution = target['resolution'] if background == 'white': target['background']= torch.ones(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') if background == 'black': target['background'] = torch.zeros(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') return target ############################################################################### # UV - map geometry & convert to a mesh ############################################################################### @torch.no_grad() def xatlas_uvmap(glctx, geometry, mat, FLAGS): eval_mesh = geometry.getMesh(mat) # Create uvs with xatlas v_pos = eval_mesh.v_pos.detach().cpu().numpy() t_pos_idx = eval_mesh.t_pos_idx.detach().cpu().numpy() vmapping, indices, uvs = xatlas.parametrize(v_pos, t_pos_idx) # Convert to tensors indices_int64 = indices.astype(np.uint64, casting='same_kind').view(np.int64) uvs = torch.tensor(uvs, dtype=torch.float32, device='cuda') faces = torch.tensor(indices_int64, dtype=torch.int64, device='cuda') new_mesh = mesh.Mesh(v_tex=uvs, t_tex_idx=faces, base=eval_mesh)
mask, kd, ks, normal = render.render_uv(glctx, new_mesh, FLAGS.texture_res, eval_mesh.material['kd_ks_normal'])
5
2023-11-27 13:44:01+00:00
16k
zhenzhiwang/intercontrol
sample/global_joint_control.py
[ { "identifier": "ControlGaussianDiffusion", "path": "diffusion/control_diffusion.py", "snippet": "class ControlGaussianDiffusion(SpacedDiffusion):\n\n def inv_transform(self, data):\n assert self.std is not None and self.mean is not None\n #assert data.requires_grad == True\n std...
from diffusion.control_diffusion import ControlGaussianDiffusion from diffusion.respace import SpacedDiffusion from utils.fixseed import fixseed from utils.parser_util import edit_control_args from utils.model_util import load_controlmdm_and_diffusion from utils import dist_util from model.cfg_sampler import wrap_model from data_loaders.get_data import get_dataset_loader from data_loaders.humanml.scripts.motion_process import recover_from_ric from data_loaders.humanml_utils import get_control_mask, HML_JOINT_NAMES from data_loaders.humanml.utils.plot_script import plot_3d_motion from model.ControlMDM import ControlMDM import os import numpy as np import torch import data_loaders.humanml.utils.paramUtil as paramUtil import shutil
11,120
# This code is based on https://github.com/openai/guided-diffusion """ Generate a large batch of image samples from a model and save them as a large numpy array. This can be used to produce samples for FID evaluation. """ def main(): args = edit_control_args() assert args.multi_person == False, 'multi-person is not supported for this script' fixseed(args.seed) out_path = args.output_dir name = os.path.basename(os.path.dirname(args.model_path)) niter = os.path.basename(args.model_path).replace('model', '').replace('.pt', '') max_frames = 196 if args.dataset in ['kit', 'humanml'] else 60 fps = 12.5 if args.dataset == 'kit' else 20 dist_util.setup_dist(args.device) if out_path == '': out_path = os.path.join(os.path.dirname(args.model_path), 'edit_{}_{}_{}_seed{}'.format(name, niter, args.inpainting_mask, args.seed)) if args.text_condition != '': out_path += '_' + args.text_condition.replace(' ', '_').replace('.', '') print('Loading dataset...') assert args.num_samples <= args.batch_size, \ f'Please either increase batch_size({args.batch_size}) or reduce num_samples({args.num_samples})' # So why do we need this check? In order to protect GPU from a memory overload in the following line. # If your GPU can handle batch size larger then default, you can specify it through --batch_size flag. # If it doesn't, and you still want to sample more prompts, run this script with different seeds # (specify through the --seed flag) args.batch_size = args.num_samples # Sampling a single batch from the testset, with exactly args.num_samples data = get_dataset_loader(name=args.dataset, batch_size=args.batch_size, num_frames=max_frames, split='test', load_mode='train', size=args.num_samples) # in train mode, you get both text and motion. # data.fixed_length = n_frames total_num_samples = args.num_samples * args.num_repetitions print("Creating model and diffusion...") DiffusionClass = ControlGaussianDiffusion if args.filter_noise else SpacedDiffusion model, diffusion = load_controlmdm_and_diffusion(args, data, dist_util.dev(), ModelClass=ControlMDM, DiffusionClass=DiffusionClass) diffusion.mean = data.dataset.t2m_dataset.mean diffusion.std = data.dataset.t2m_dataset.std iterator = iter(data) input_motions, model_kwargs = next(iterator) input_motions = input_motions.to(dist_util.dev()) if args.text_condition != '': texts = [args.text_condition] * args.num_samples model_kwargs['y']['text'] = texts # add inpainting mask according to args control_joint = 'right_wrist' assert max_frames == input_motions.shape[-1] gt_frames_per_sample = {} n_joints = 22 if input_motions.shape[1] == 263 else 21 unnormalized_motion = data.dataset.t2m_dataset.inv_transform_torch(input_motions.permute(0, 2, 3, 1)).float() global_joints = recover_from_ric(unnormalized_motion, n_joints) global_joints = global_joints.view(-1, *global_joints.shape[2:]).permute(0, 2, 3, 1) global_joints.requires_grad = False model_kwargs['y']['global_joint'] = global_joints
# This code is based on https://github.com/openai/guided-diffusion """ Generate a large batch of image samples from a model and save them as a large numpy array. This can be used to produce samples for FID evaluation. """ def main(): args = edit_control_args() assert args.multi_person == False, 'multi-person is not supported for this script' fixseed(args.seed) out_path = args.output_dir name = os.path.basename(os.path.dirname(args.model_path)) niter = os.path.basename(args.model_path).replace('model', '').replace('.pt', '') max_frames = 196 if args.dataset in ['kit', 'humanml'] else 60 fps = 12.5 if args.dataset == 'kit' else 20 dist_util.setup_dist(args.device) if out_path == '': out_path = os.path.join(os.path.dirname(args.model_path), 'edit_{}_{}_{}_seed{}'.format(name, niter, args.inpainting_mask, args.seed)) if args.text_condition != '': out_path += '_' + args.text_condition.replace(' ', '_').replace('.', '') print('Loading dataset...') assert args.num_samples <= args.batch_size, \ f'Please either increase batch_size({args.batch_size}) or reduce num_samples({args.num_samples})' # So why do we need this check? In order to protect GPU from a memory overload in the following line. # If your GPU can handle batch size larger then default, you can specify it through --batch_size flag. # If it doesn't, and you still want to sample more prompts, run this script with different seeds # (specify through the --seed flag) args.batch_size = args.num_samples # Sampling a single batch from the testset, with exactly args.num_samples data = get_dataset_loader(name=args.dataset, batch_size=args.batch_size, num_frames=max_frames, split='test', load_mode='train', size=args.num_samples) # in train mode, you get both text and motion. # data.fixed_length = n_frames total_num_samples = args.num_samples * args.num_repetitions print("Creating model and diffusion...") DiffusionClass = ControlGaussianDiffusion if args.filter_noise else SpacedDiffusion model, diffusion = load_controlmdm_and_diffusion(args, data, dist_util.dev(), ModelClass=ControlMDM, DiffusionClass=DiffusionClass) diffusion.mean = data.dataset.t2m_dataset.mean diffusion.std = data.dataset.t2m_dataset.std iterator = iter(data) input_motions, model_kwargs = next(iterator) input_motions = input_motions.to(dist_util.dev()) if args.text_condition != '': texts = [args.text_condition] * args.num_samples model_kwargs['y']['text'] = texts # add inpainting mask according to args control_joint = 'right_wrist' assert max_frames == input_motions.shape[-1] gt_frames_per_sample = {} n_joints = 22 if input_motions.shape[1] == 263 else 21 unnormalized_motion = data.dataset.t2m_dataset.inv_transform_torch(input_motions.permute(0, 2, 3, 1)).float() global_joints = recover_from_ric(unnormalized_motion, n_joints) global_joints = global_joints.view(-1, *global_joints.shape[2:]).permute(0, 2, 3, 1) global_joints.requires_grad = False model_kwargs['y']['global_joint'] = global_joints
model_kwargs['y']['global_joint_mask'] = torch.tensor(get_control_mask(args.inpainting_mask, global_joints.shape, joint = control_joint, ratio=args.mask_ratio, dataset = args.dataset)).float().to(dist_util.dev())
9
2023-11-27 05:28:02+00:00
16k
moonbow721/DPoser
run/train.py
[ { "identifier": "save_obj", "path": "lib/body_model/visual.py", "snippet": "def save_obj(v, f, file_name='output.obj'):\n obj_file = open(file_name, 'w')\n for i in range(len(v)):\n obj_file.write('v ' + str(v[i][0]) + ' ' + str(v[i][1]) + ' ' + str(v[i][2]) + '\\n')\n for i in range(len...
import os import pprint import traceback import cv2 import numpy as np import torch from pathlib import Path from absl import app from absl import flags from absl.flags import argparse_flags from ml_collections.config_flags import config_flags from torch.utils.data import DataLoader from lib.body_model.visual import save_obj, render_mesh from lib.utils.metric import average_pairwise_distance from lib.utils.misc import create_mask from tensorboardX import SummaryWriter from torch.utils.tensorboard import SummaryWriter from lib.utils.generic import create_logger from lib.algorithms.advanced.model import ScoreModelFC, TimeMLPs from lib.algorithms.advanced import losses, sde_lib, sampling, likelihood from lib.algorithms.ema import ExponentialMovingAverage from lib.dataset.AMASS import AMASSDataset, N_POSES from lib.utils.transforms import rot6d_to_axis_angle from lib.body_model.body_model import BodyModel from lib.dataset.AMASS import Evaler
11,146
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") bg_img = np.ones([512, 384, 3]) * 255 # background canvas focal = [1500, 1500] princpt = [200, 192] def parse_args(argv): parser = argparse_flags.ArgumentParser(description='train diffusion model') parser.add_argument('--dataset-folder', type=str, default='./data/AMASS/amass_processed', help='the folder includes necessary normalizing parameters') parser.add_argument('--version', type=str, default='version1', help='dataset version') parser.add_argument('--bodymodel-path', type=str, default='../body_models/smplx/SMPLX_NEUTRAL.npz', help='path of SMPLX model [for visual validation]') parser.add_argument('--restore-dir', type=str, help='resume training') parser.add_argument('--shape', type=bool, default=False, help='handle human shapes (have not been tested)') parser.add_argument('--sample', type=int, help='sample trainset to reduce data') parser.add_argument('--task', type=str, default=None, help='for validating') parser.add_argument('--name', type=str, default='', help='name of checkpoint folder') args = parser.parse_args(argv[1:]) return args def get_dataloader(root_path='', subset='train', version='', sample_interval=None, rot_rep='rot6d', return_shape=False, normalize=True, min_max=True): dataset = AMASSDataset(root_path=root_path, version=version, subset=subset, sample_interval=sample_interval, rot_rep=rot_rep, return_shape=return_shape, normalize=normalize, min_max=min_max) print('AMASS version: {}, rot_rep: {}, normalize: {}'.format(version, rot_rep, normalize)) # drop the last batch to ensure that body model can work all the time if subset == 'train': dataloader = DataLoader(dataset, batch_size=FLAGS.config.training.batch_size, shuffle=True, num_workers=4, pin_memory=False, drop_last=True) else: dataloader = DataLoader(dataset, batch_size=FLAGS.config.eval.batch_size, shuffle=False, num_workers=4, pin_memory=False, drop_last=True) return dataloader, dataset def main(args): def log_metrics(metrics, step, config, logger): log_freq = config.training.log_freq msg = f'Iter: [{step}/{num_train_steps}, {step / num_train_steps * 100:.2f}%][{idx}/{len(train_loader)}],\t' for key, value in metrics.items(): metrics[key] /= log_freq msg += f"{key}: {metrics[key]:.6f},\t" logger.info(msg) metrics = {key: 0.0 for key in metrics} return metrics def log_eval_metrics(metrics, step, writer): for key, value in metrics.items(): avg_value = np.mean(value).item() writer.add_scalar(f'eval_{key}', avg_value, step) metrics[key] = [] # Reset for the next evaluation # args = parse_args() config = FLAGS.config logger, final_output_dir, tb_log_dir = create_logger( config, 'train', folder_name=args.name) if config.training.render: obj_dir = Path(final_output_dir) / 'obj_results' render_dir = Path(final_output_dir) / 'render_results' if not obj_dir.exists(): print('=> creating {}'.format(obj_dir)) obj_dir.mkdir() if not render_dir.exists(): print('=> creating {}'.format(render_dir)) render_dir.mkdir() logger.info(pprint.pformat(config)) logger.info(pprint.pformat(args)) writer = SummaryWriter(tb_log_dir) ''' setup body model for val''' body_model_vis = BodyModel(bm_path=args.bodymodel_path, num_betas=10, batch_size=50, model_type='smplx').to(device) ''' setup datasets, dataloaders''' if args.sample: logger.info(f'sample trainset every {args.sample} frame') train_loader, train_dataset = get_dataloader(args.dataset_folder, 'train', args.version, args.sample, config.data.rot_rep, args.shape, config.data.normalize, config.data.min_max) test_loader, test_dataset = get_dataloader(args.dataset_folder, 'test', args.version, 100, config.data.rot_rep, args.shape, config.data.normalize, config.data.min_max) # always sample testset to save time denormalize_data = train_dataset.Denormalize if config.data.normalize else lambda x: x logger.info(f'total train samples: {len(train_dataset)}') logger.info(f'total test samples: {len(test_dataset)}') ''' setup score networks ''' POSE_DIM = 3 if config.data.rot_rep == 'axis' else 6 if config.model.type == 'ScoreModelFC': model = ScoreModelFC( config, n_poses=N_POSES, pose_dim=POSE_DIM, hidden_dim=config.model.HIDDEN_DIM, embed_dim=config.model.EMBED_DIM, n_blocks=config.model.N_BLOCKS, ) elif config.model.type == 'TimeMLPs':
try: except ImportError as e: try: except ImportError as e: print('Tensorboard is not Installed') FLAGS = flags.FLAGS config_flags.DEFINE_config_file( "config", None, "Training configuration.", lock_config=False) flags.mark_flags_as_required(["config"]) # global device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") bg_img = np.ones([512, 384, 3]) * 255 # background canvas focal = [1500, 1500] princpt = [200, 192] def parse_args(argv): parser = argparse_flags.ArgumentParser(description='train diffusion model') parser.add_argument('--dataset-folder', type=str, default='./data/AMASS/amass_processed', help='the folder includes necessary normalizing parameters') parser.add_argument('--version', type=str, default='version1', help='dataset version') parser.add_argument('--bodymodel-path', type=str, default='../body_models/smplx/SMPLX_NEUTRAL.npz', help='path of SMPLX model [for visual validation]') parser.add_argument('--restore-dir', type=str, help='resume training') parser.add_argument('--shape', type=bool, default=False, help='handle human shapes (have not been tested)') parser.add_argument('--sample', type=int, help='sample trainset to reduce data') parser.add_argument('--task', type=str, default=None, help='for validating') parser.add_argument('--name', type=str, default='', help='name of checkpoint folder') args = parser.parse_args(argv[1:]) return args def get_dataloader(root_path='', subset='train', version='', sample_interval=None, rot_rep='rot6d', return_shape=False, normalize=True, min_max=True): dataset = AMASSDataset(root_path=root_path, version=version, subset=subset, sample_interval=sample_interval, rot_rep=rot_rep, return_shape=return_shape, normalize=normalize, min_max=min_max) print('AMASS version: {}, rot_rep: {}, normalize: {}'.format(version, rot_rep, normalize)) # drop the last batch to ensure that body model can work all the time if subset == 'train': dataloader = DataLoader(dataset, batch_size=FLAGS.config.training.batch_size, shuffle=True, num_workers=4, pin_memory=False, drop_last=True) else: dataloader = DataLoader(dataset, batch_size=FLAGS.config.eval.batch_size, shuffle=False, num_workers=4, pin_memory=False, drop_last=True) return dataloader, dataset def main(args): def log_metrics(metrics, step, config, logger): log_freq = config.training.log_freq msg = f'Iter: [{step}/{num_train_steps}, {step / num_train_steps * 100:.2f}%][{idx}/{len(train_loader)}],\t' for key, value in metrics.items(): metrics[key] /= log_freq msg += f"{key}: {metrics[key]:.6f},\t" logger.info(msg) metrics = {key: 0.0 for key in metrics} return metrics def log_eval_metrics(metrics, step, writer): for key, value in metrics.items(): avg_value = np.mean(value).item() writer.add_scalar(f'eval_{key}', avg_value, step) metrics[key] = [] # Reset for the next evaluation # args = parse_args() config = FLAGS.config logger, final_output_dir, tb_log_dir = create_logger( config, 'train', folder_name=args.name) if config.training.render: obj_dir = Path(final_output_dir) / 'obj_results' render_dir = Path(final_output_dir) / 'render_results' if not obj_dir.exists(): print('=> creating {}'.format(obj_dir)) obj_dir.mkdir() if not render_dir.exists(): print('=> creating {}'.format(render_dir)) render_dir.mkdir() logger.info(pprint.pformat(config)) logger.info(pprint.pformat(args)) writer = SummaryWriter(tb_log_dir) ''' setup body model for val''' body_model_vis = BodyModel(bm_path=args.bodymodel_path, num_betas=10, batch_size=50, model_type='smplx').to(device) ''' setup datasets, dataloaders''' if args.sample: logger.info(f'sample trainset every {args.sample} frame') train_loader, train_dataset = get_dataloader(args.dataset_folder, 'train', args.version, args.sample, config.data.rot_rep, args.shape, config.data.normalize, config.data.min_max) test_loader, test_dataset = get_dataloader(args.dataset_folder, 'test', args.version, 100, config.data.rot_rep, args.shape, config.data.normalize, config.data.min_max) # always sample testset to save time denormalize_data = train_dataset.Denormalize if config.data.normalize else lambda x: x logger.info(f'total train samples: {len(train_dataset)}') logger.info(f'total test samples: {len(test_dataset)}') ''' setup score networks ''' POSE_DIM = 3 if config.data.rot_rep == 'axis' else 6 if config.model.type == 'ScoreModelFC': model = ScoreModelFC( config, n_poses=N_POSES, pose_dim=POSE_DIM, hidden_dim=config.model.HIDDEN_DIM, embed_dim=config.model.EMBED_DIM, n_blocks=config.model.N_BLOCKS, ) elif config.model.type == 'TimeMLPs':
model = TimeMLPs(
6
2023-11-29 15:55:50+00:00
16k
KylinYee/R2-Talker-code
test.py
[ { "identifier": "NeRFDataset_Test", "path": "nerf/provider.py", "snippet": "class NeRFDataset_Test:\n def __init__(self, opt, device, downscale=1):\n super().__init__()\n \n self.opt = opt\n self.device = device\n self.downscale = downscale\n self.scale = opt...
import torch import argparse from nerf.provider import NeRFDataset_Test from nerf.gui import NeRFGUI from nerf.utils import * from nerf.network import NeRFNetwork, R2TalkerNeRF, GeneNeRFNetwork
10,823
parser.add_argument('--fix_eye', type=float, default=-1, help="fixed eye area, negative to disable, set to 0-0.3 for a reasonable eye") parser.add_argument('--smooth_eye', action='store_true', help="smooth the eye area sequence") parser.add_argument('--torso_shrink', type=float, default=0.8, help="shrink bg coords to allow more flexibility in deform") ### dataset options parser.add_argument('--color_space', type=str, default='srgb', help="Color space, supports (linear, srgb)") # parser.add_argument('--preload', action='store_true', help="preload all data into GPU, accelerate training but use more GPU memory") # (the default value is for the fox dataset) parser.add_argument('--bound', type=float, default=1, help="assume the scene is bounded in box[-bound, bound]^3, if > 1, will invoke adaptive ray marching.") parser.add_argument('--scale', type=float, default=4, help="scale camera location into box[-bound, bound]^3") parser.add_argument('--offset', type=float, nargs='*', default=[0, 0, 0], help="offset of camera location") parser.add_argument('--dt_gamma', type=float, default=1/256, help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)") parser.add_argument('--min_near', type=float, default=0.05, help="minimum near distance for camera") parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied (sigma)") parser.add_argument('--density_thresh_torso', type=float, default=0.01, help="threshold for density grid to be occupied (alpha)") parser.add_argument('--patch_size', type=int, default=1, help="[experimental] render patches in training, so as to apply LPIPS loss. 1 means disabled, use [64, 32, 16] to enable") parser.add_argument('--finetune_lips', action='store_true', help="use LPIPS and landmarks to fine tune lips region") parser.add_argument('--smooth_lips', action='store_true', help="smooth the enc_a in a exponential decay way...") parser.add_argument('--torso', action='store_true', help="fix head and train torso") parser.add_argument('--head_ckpt', type=str, default='', help="head model") ### GUI options parser.add_argument('--gui', action='store_true', help="start a GUI") parser.add_argument('--W', type=int, default=450, help="GUI width") parser.add_argument('--H', type=int, default=450, help="GUI height") parser.add_argument('--radius', type=float, default=3.35, help="default GUI camera radius from center") parser.add_argument('--fovy', type=float, default=21.24, help="default GUI camera fovy") parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel") ### else parser.add_argument('--att', type=int, default=2, help="audio attention mode (0 = turn off, 1 = left-direction, 2 = bi-direction)") parser.add_argument('--emb', action='store_true', help="use audio class + embedding instead of logits") parser.add_argument('--ind_dim', type=int, default=4, help="individual code dim, 0 to turn off") parser.add_argument('--ind_num', type=int, default=10000, help="number of individual codes, should be larger than training dataset size") parser.add_argument('--ind_dim_torso', type=int, default=8, help="individual code dim, 0 to turn off") parser.add_argument('--amb_dim', type=int, default=2, help="ambient dimension") parser.add_argument('--part', action='store_true', help="use partial training data (1/10)") parser.add_argument('--part2', action='store_true', help="use partial training data (first 15s)") parser.add_argument('--train_camera', action='store_true', help="optimize camera pose") parser.add_argument('--smooth_path', action='store_true', help="brute-force smooth camera pose trajectory with a window size") parser.add_argument('--smooth_path_window', type=int, default=7, help="smoothing window size") # asr parser.add_argument('--asr', action='store_true', help="load asr for real-time app") parser.add_argument('--asr_wav', type=str, default='', help="load the wav and use as input") parser.add_argument('--asr_play', action='store_true', help="play out the audio") parser.add_argument('--asr_model', type=str, default='cpierse/wav2vec2-large-xlsr-53-esperanto') # parser.add_argument('--asr_model', type=str, default='facebook/wav2vec2-large-960h-lv60-self') parser.add_argument('--asr_save_feats', action='store_true') # audio FPS parser.add_argument('--fps', type=int, default=50) # sliding window left-middle-right length (unit: 20ms) parser.add_argument('-l', type=int, default=10) parser.add_argument('-m', type=int, default=50) parser.add_argument('-r', type=int, default=10) opt = parser.parse_args() if opt.method == 'r2talker': opt.cond_type = 'idexp' elif opt.method == 'genefaceDagger': opt.cond_type = 'idexp' elif opt.method == 'rad-nerf': opt.cond_type = 'eo' # assert test mode opt.test = True opt.test_train = False # explicit smoothing opt.smooth_path = True opt.smooth_eye = True opt.smooth_lips = True assert opt.pose != '', 'Must provide a pose source' assert opt.aud != '', 'Must provide an audio source' if opt.O: opt.fp16 = True opt.exp_eye = True opt.cuda_ray = True # assert opt.cuda_ray, "Only support CUDA ray mode." print(opt) seed_everything(opt.seed) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if opt.method == 'r2talker': model = R2TalkerNeRF(opt) elif opt.method == 'genefaceDagger': model = GeneNeRFNetwork(opt) elif opt.method == 'rad-nerf': model = NeRFNetwork(opt) # print(model) trainer = Trainer('ngp', opt, model, device=device, workspace=opt.workspace, fp16=opt.fp16, metrics=[], use_checkpoint=opt.ckpt) test_loader = NeRFDataset_Test(opt, device=device).dataloader() # temp fix: for update_extra_states model.aud_features = test_loader._data.auds model.eye_areas = test_loader._data.eye_area if opt.gui: # we still need test_loader to provide audio features for testing.
# torch.autograd.set_detect_anomaly(True) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--pose', type=str, help="transforms.json, pose source") parser.add_argument('--aud', type=str, default=None, help="aud.npy, audio source") parser.add_argument('--cond_type', type=str, default=None, help="type of driving condition: eo, ds, idexp") parser.add_argument('--method', type=str, default='r2talker', help="r2talker, genefaceDagger, rad-nerf") parser.add_argument('--bg_img', type=str, default='white', help="bg.jpg, background image source") parser.add_argument('-O', action='store_true', help="equals --fp16 --cuda_ray --exp_eye") # parser.add_argument('--test', action='store_true', help="test mode (load model and test dataset)") # parser.add_argument('--test_train', action='store_true', help="test mode (load model and train dataset)") parser.add_argument('--data_range', type=int, nargs='*', default=[0, -1], help="data range to use") parser.add_argument('--workspace', type=str, default='workspace') parser.add_argument('--seed', type=int, default=0) ### training options # parser.add_argument('--iters', type=int, default=200000, help="training iters") # parser.add_argument('--lr', type=float, default=5e-3, help="initial learning rate") # parser.add_argument('--lr_net', type=float, default=5e-4, help="initial learning rate") parser.add_argument('--ckpt', type=str, default='latest') parser.add_argument('--num_rays', type=int, default=4096 * 16, help="num rays sampled per image for each training step") parser.add_argument('--cuda_ray', action='store_true', help="use CUDA raymarching instead of pytorch") parser.add_argument('--max_steps', type=int, default=16, help="max num steps sampled per ray (only valid when using --cuda_ray)") parser.add_argument('--num_steps', type=int, default=16, help="num steps sampled per ray (only valid when NOT using --cuda_ray)") parser.add_argument('--upsample_steps', type=int, default=0, help="num steps up-sampled per ray (only valid when NOT using --cuda_ray)") parser.add_argument('--update_extra_interval', type=int, default=16, help="iter interval to update extra status (only valid when using --cuda_ray)") parser.add_argument('--max_ray_batch', type=int, default=4096, help="batch size of rays at inference to avoid OOM (only valid when NOT using --cuda_ray)") ### network backbone options parser.add_argument('--fp16', action='store_true', help="use amp mixed precision training") parser.add_argument('--lambda_amb', type=float, default=0.1, help="lambda for ambient loss") parser.add_argument('--fbg', action='store_true', help="frame-wise bg") parser.add_argument('--exp_eye', action='store_true', help="explicitly control the eyes") parser.add_argument('--fix_eye', type=float, default=-1, help="fixed eye area, negative to disable, set to 0-0.3 for a reasonable eye") parser.add_argument('--smooth_eye', action='store_true', help="smooth the eye area sequence") parser.add_argument('--torso_shrink', type=float, default=0.8, help="shrink bg coords to allow more flexibility in deform") ### dataset options parser.add_argument('--color_space', type=str, default='srgb', help="Color space, supports (linear, srgb)") # parser.add_argument('--preload', action='store_true', help="preload all data into GPU, accelerate training but use more GPU memory") # (the default value is for the fox dataset) parser.add_argument('--bound', type=float, default=1, help="assume the scene is bounded in box[-bound, bound]^3, if > 1, will invoke adaptive ray marching.") parser.add_argument('--scale', type=float, default=4, help="scale camera location into box[-bound, bound]^3") parser.add_argument('--offset', type=float, nargs='*', default=[0, 0, 0], help="offset of camera location") parser.add_argument('--dt_gamma', type=float, default=1/256, help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)") parser.add_argument('--min_near', type=float, default=0.05, help="minimum near distance for camera") parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied (sigma)") parser.add_argument('--density_thresh_torso', type=float, default=0.01, help="threshold for density grid to be occupied (alpha)") parser.add_argument('--patch_size', type=int, default=1, help="[experimental] render patches in training, so as to apply LPIPS loss. 1 means disabled, use [64, 32, 16] to enable") parser.add_argument('--finetune_lips', action='store_true', help="use LPIPS and landmarks to fine tune lips region") parser.add_argument('--smooth_lips', action='store_true', help="smooth the enc_a in a exponential decay way...") parser.add_argument('--torso', action='store_true', help="fix head and train torso") parser.add_argument('--head_ckpt', type=str, default='', help="head model") ### GUI options parser.add_argument('--gui', action='store_true', help="start a GUI") parser.add_argument('--W', type=int, default=450, help="GUI width") parser.add_argument('--H', type=int, default=450, help="GUI height") parser.add_argument('--radius', type=float, default=3.35, help="default GUI camera radius from center") parser.add_argument('--fovy', type=float, default=21.24, help="default GUI camera fovy") parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel") ### else parser.add_argument('--att', type=int, default=2, help="audio attention mode (0 = turn off, 1 = left-direction, 2 = bi-direction)") parser.add_argument('--emb', action='store_true', help="use audio class + embedding instead of logits") parser.add_argument('--ind_dim', type=int, default=4, help="individual code dim, 0 to turn off") parser.add_argument('--ind_num', type=int, default=10000, help="number of individual codes, should be larger than training dataset size") parser.add_argument('--ind_dim_torso', type=int, default=8, help="individual code dim, 0 to turn off") parser.add_argument('--amb_dim', type=int, default=2, help="ambient dimension") parser.add_argument('--part', action='store_true', help="use partial training data (1/10)") parser.add_argument('--part2', action='store_true', help="use partial training data (first 15s)") parser.add_argument('--train_camera', action='store_true', help="optimize camera pose") parser.add_argument('--smooth_path', action='store_true', help="brute-force smooth camera pose trajectory with a window size") parser.add_argument('--smooth_path_window', type=int, default=7, help="smoothing window size") # asr parser.add_argument('--asr', action='store_true', help="load asr for real-time app") parser.add_argument('--asr_wav', type=str, default='', help="load the wav and use as input") parser.add_argument('--asr_play', action='store_true', help="play out the audio") parser.add_argument('--asr_model', type=str, default='cpierse/wav2vec2-large-xlsr-53-esperanto') # parser.add_argument('--asr_model', type=str, default='facebook/wav2vec2-large-960h-lv60-self') parser.add_argument('--asr_save_feats', action='store_true') # audio FPS parser.add_argument('--fps', type=int, default=50) # sliding window left-middle-right length (unit: 20ms) parser.add_argument('-l', type=int, default=10) parser.add_argument('-m', type=int, default=50) parser.add_argument('-r', type=int, default=10) opt = parser.parse_args() if opt.method == 'r2talker': opt.cond_type = 'idexp' elif opt.method == 'genefaceDagger': opt.cond_type = 'idexp' elif opt.method == 'rad-nerf': opt.cond_type = 'eo' # assert test mode opt.test = True opt.test_train = False # explicit smoothing opt.smooth_path = True opt.smooth_eye = True opt.smooth_lips = True assert opt.pose != '', 'Must provide a pose source' assert opt.aud != '', 'Must provide an audio source' if opt.O: opt.fp16 = True opt.exp_eye = True opt.cuda_ray = True # assert opt.cuda_ray, "Only support CUDA ray mode." print(opt) seed_everything(opt.seed) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if opt.method == 'r2talker': model = R2TalkerNeRF(opt) elif opt.method == 'genefaceDagger': model = GeneNeRFNetwork(opt) elif opt.method == 'rad-nerf': model = NeRFNetwork(opt) # print(model) trainer = Trainer('ngp', opt, model, device=device, workspace=opt.workspace, fp16=opt.fp16, metrics=[], use_checkpoint=opt.ckpt) test_loader = NeRFDataset_Test(opt, device=device).dataloader() # temp fix: for update_extra_states model.aud_features = test_loader._data.auds model.eye_areas = test_loader._data.eye_area if opt.gui: # we still need test_loader to provide audio features for testing.
with NeRFGUI(opt, trainer, test_loader) as gui:
1
2023-12-04 12:51:59+00:00
16k
ubc-vision/vivid123
vivid123/generation_utils.py
[ { "identifier": "CLIPCameraProjection", "path": "vivid123/models/clip_camera_projection.py", "snippet": "class CLIPCameraProjection(ModelMixin, ConfigMixin):\n \"\"\"\n A Projection layer for CLIP embedding and camera embedding.\n Parameters:\n embedding_dim (`int`, *optional*, defaults ...
import os import yaml import re import torch import numpy as np import imageio.v3 as imageio from typing import List, Any from yaml.parser import ParserError from PIL import Image from diffusers.pipelines import DiffusionPipeline from diffusers.models import UNet2DConditionModel, AutoencoderKL from diffusers.schedulers import DPMSolverMultistepScheduler, EulerDiscreteScheduler from diffusers.pipelines import DiffusionPipeline from transformers import CLIPVisionModelWithProjection from .models import CLIPCameraProjection from .pipelines import ViVid123Pipeline from .configs import ViVid123BaseSchema
11,310
assert ( video_linear_start_weight >= 0.0 and video_linear_start_weight <= 1.0 ), "video_linear_start_weight must be between 0.0 and 1.0" assert ( video_linear_end_weight >= 0.0 and video_linear_end_weight <= 1.0 ), "video_linear_end_weight must be between 0.0 and 1.0" assert ( video_start_step_percentage >= 0.0 and video_start_step_percentage <= 1.0 ), "video_start_step_percentage must be between 0.0 and 1.0" assert ( video_end_step_percentage >= 0.0 and video_end_step_percentage <= 1.0 ), "video_end_step_percentage must be between 0.0 and 1.0" assert ( zero123_linear_start_weight >= 0.0 and zero123_linear_start_weight <= 1.0 ), "zero123_linear_start_weight must be between 0.0 and 1.0" assert ( zero123_linear_end_weight >= 0.0 and zero123_linear_end_weight <= 1.0 ), "zero123_linear_end_weight must be between 0.0 and 1.0" assert ( zero123_start_step_percentage >= 0.0 and zero123_start_step_percentage <= 1.0 ), "zero123_start_step_percentage must be between 0.0 and 1.0" assert ( zero123_end_step_percentage >= 0.0 and zero123_end_step_percentage <= 1.0 ), "zero123_end_step_percentage must be between 0.0 and 1.0" video_schedule = torch.linspace( start=video_linear_start_weight, end=video_linear_end_weight, steps=int((video_end_step_percentage - video_start_step_percentage) * num_inference_steps), ) zero123_schedule = torch.linspace( start=zero123_linear_start_weight, end=zero123_linear_end_weight, steps=int((zero123_end_step_percentage - zero123_start_step_percentage) * num_inference_steps), ) if video_schedule.shape[0] < num_inference_steps: video_schedule = torch.cat( [ video_linear_start_weight * torch.ones([video_start_step_percentage * num_inference_steps]), video_schedule, video_linear_end_weight * torch.ones([num_inference_steps - video_end_step_percentage * num_inference_steps]), ] ) if zero123_schedule.shape[0] < num_inference_steps: zero123_schedule = torch.cat( [ zero123_linear_start_weight * torch.ones([zero123_start_step_percentage * num_inference_steps]), zero123_schedule, zero123_linear_end_weight * torch.ones([num_inference_steps - zero123_end_step_percentage * num_inference_steps]), ] ) return (video_schedule, zero123_schedule) def save_videos_grid_zeroscope_nplist(video_frames: List[np.ndarray], path: str, n_rows=6, fps=8, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]): # fourcc = cv2.VideoWriter_fourcc(*"mp4v") f = len(video_frames) h, w, c = video_frames[0].shape #images = [(image).astype("uint8") for image in video_frames] os.makedirs(os.path.dirname(path), exist_ok=True) imageio.imwrite(path, video_frames, fps=fps) def prepare_pipelines( ZERO123_MODEL_ID: str = "bennyguo/zero123-xl-diffusers", VIDEO_MODEL_ID: str = "cerspense/zeroscope_v2_576w", VIDEO_XL_MODEL_ID: str = "cerspense/zeroscope_v2_XL" ): zero123_unet = UNet2DConditionModel.from_pretrained(ZERO123_MODEL_ID, subfolder="unet") zero123_cam_proj = CLIPCameraProjection.from_pretrained(ZERO123_MODEL_ID, subfolder="clip_camera_projection") zero123_img_enc = CLIPVisionModelWithProjection.from_pretrained(ZERO123_MODEL_ID, subfolder="image_encoder") vivid123_pipe = ViVid123Pipeline.from_pretrained( VIDEO_MODEL_ID, # torch_dtype=torch.float16, novel_view_unet=zero123_unet, image_encoder=zero123_img_enc, cc_projection=zero123_cam_proj, ) vivid123_pipe.scheduler = DPMSolverMultistepScheduler.from_config(vivid123_pipe.scheduler.config) # vivid123_pipe.to("cuda") vivid123_pipe.enable_model_cpu_offload() xl_pipe = DiffusionPipeline.from_pretrained(VIDEO_XL_MODEL_ID, torch_dtype=torch.float16) xl_pipe.scheduler = DPMSolverMultistepScheduler.from_config(xl_pipe.scheduler.config) # xl_pipe.to("cuda") xl_pipe.enable_model_cpu_offload() return vivid123_pipe, xl_pipe def generation_vivid123( vivid123_pipe: ViVid123Pipeline, xl_pipe: DiffusionPipeline, config_path: str, output_root_dir: str, ): # loading yaml config _var_matcher = re.compile(r"\${([^}^{]+)}") _tag_matcher = re.compile(r"[^$]*\${([^}^{]+)}.*") def _path_constructor(_loader: Any, node: Any): def replace_fn(match): envparts = f"{match.group(1)}:".split(":") return os.environ.get(envparts[0], envparts[1]) return _var_matcher.sub(replace_fn, node.value) def load_yaml(filename: str) -> dict: yaml.add_implicit_resolver("!envvar", _tag_matcher, None, yaml.SafeLoader) yaml.add_constructor("!envvar", _path_constructor, yaml.SafeLoader) try: with open(filename, "r") as f: return yaml.safe_load(f.read()) except (FileNotFoundError, PermissionError, ParserError): return dict() yaml_loaded = load_yaml(config_path)
def prepare_cam_pose_input( num_frames: int = 25, delta_elevation_start: float = 0.0, delta_elevation_end: float = 0.0, delta_azimuth_start: float = -45.0, delta_azimuth_end: float = 45.0, delta_radius_start: float = 0.0, delta_radius_end: float = 0.0, ): r""" The function to prepare the input to the vivid123 pipeline Args: delta_elevation_start (`float`, *optional*, defaults to 0.0): The starting relative elevation angle of the camera, in degree. Relative to the elevation of the reference image. The camera is facing towards the origin. delta_elevation_end (`float`, *optional*, defaults to 0.0): The ending relative elevation angle of the camera, in degree. Relative to the elevation of the reference image. The camera is facing towards the origin. delta_azimuth_start (`float`, *optional*, defaults to -45.0): The starting relative azimuth angle of the camera, in degree. Relative to the elevation of the reference image. The camera is facing towards the origin. delta_azimuth_end (`float`, *optional*, defaults to 45.0): The ending relative azimuth angle of the camera, in degree. Relative to the elevation of the reference image. The camera is facing towards the origin. Returns: """ cam_elevation = np.radians(np.linspace(delta_elevation_start, delta_elevation_end, num_frames))[..., None] cam_azimuth = np.radians(np.linspace(delta_azimuth_start, delta_azimuth_end, num_frames)) cam_azimuth_sin_cos = np.stack([np.sin(cam_azimuth), np.cos(cam_azimuth)], axis=-1) cam_radius = np.linspace(delta_radius_start, delta_radius_end, num_frames)[..., None] cam_pose_np = np.concatenate([cam_elevation, cam_azimuth_sin_cos, cam_radius], axis=-1) cam_pose_torch = torch.from_numpy(cam_pose_np) return cam_pose_torch # refer to https://stackoverflow.com/a/33507138/6257375 def conver_rgba_to_rgb_white_bg( image: Image, H: int = 256, W: int = 256, ): input_image = image.convert("RGBA").resize((H, W), Image.BICUBIC) background = Image.new("RGBA", input_image.size, (255, 255, 255)) alpha_composite = Image.alpha_composite(background, input_image) return alpha_composite def prepare_fusion_schedule_linear( num_inference_steps: int = 50, video_linear_start_weight: float = 1.0, video_linear_end_weight: float = 0.5, video_start_step_percentage: float = 0.0, video_end_step_percentage: float = 1.0, zero123_linear_start_weight: float = 1.0, zero123_linear_end_weight: float = 1.0, zero123_start_step_percentage: float = 0.0, zero123_end_step_percentage: float = 1.0, ): """ Prepare the fusion schedule of video diffusion and zero123 at all the denoising steps Args: video_linear_start_weight (`float`, *optional*, defaults to 1.0): The weight of the video diffusion at the start of the video. The weight is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` during the video diffusion. video_linear_end_weight (`float`, *optional*, defaults to 0.5): The weight of the video diffusion at the end of the video. The weight is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` during the video diffusion. video_start_step_percentage (`float`, *optional*, defaults to 0.0): The percentage of the total number of inference steps at which the video diffusion starts. The video diffusion is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` between `video_start_step_percentage` and `video_end_step_percentage`. video_end_step_percentage (`float`, *optional*, defaults to 1.0): The percentage of the total number of inference steps at which the video diffusion ends. The video diffusion is linearly increased from `video_linear_start_weight` to `video_linear_end_weight` between `video_start_step_percentage` and `video_end_step_percentage`. zero123_linear_start_weight (`float`, *optional*, defaults to 1.0): The weight of the zero123 diffusion at the start of the video. The weight is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` during the zero123 diffusion. zero123_linear_end_weight (`float`, *optional*, defaults to 1.0): The weight of the zero123 diffusion at the end of the video. The weight is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` during the zero123 diffusion. zero123_start_step_percentage (`float`, *optional*, defaults to 0.0): The percentage of the total number of inference steps at which the zero123 diffusion starts. The zero123 diffusion is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` between `zero123_start_step_percentage` and `zero123_end_step_percentage`. zero123_end_step_percentage (`float`, *optional*, defaults to 1.0): The percentage of the total number of inference steps at which the zero123 diffusion ends. The zero123 diffusion is linearly increased from `zero123_linear_start_weight` to `zero123_linear_end_weight` between `zero123_start_step_percentage` and `zero123_end_step_percentage`. Return: A tuple of two tensors, video_schedule (`torch.Tensor`): The schedule of the video diffusion weighting, with shape `[num_inference_steps]`. zero123_schedule (`torch.Tensor`): The schedule of the zero123 diffusion weighting, with shape `[num_inference_steps]`. """ assert ( video_linear_start_weight >= 0.0 and video_linear_start_weight <= 1.0 ), "video_linear_start_weight must be between 0.0 and 1.0" assert ( video_linear_end_weight >= 0.0 and video_linear_end_weight <= 1.0 ), "video_linear_end_weight must be between 0.0 and 1.0" assert ( video_start_step_percentage >= 0.0 and video_start_step_percentage <= 1.0 ), "video_start_step_percentage must be between 0.0 and 1.0" assert ( video_end_step_percentage >= 0.0 and video_end_step_percentage <= 1.0 ), "video_end_step_percentage must be between 0.0 and 1.0" assert ( zero123_linear_start_weight >= 0.0 and zero123_linear_start_weight <= 1.0 ), "zero123_linear_start_weight must be between 0.0 and 1.0" assert ( zero123_linear_end_weight >= 0.0 and zero123_linear_end_weight <= 1.0 ), "zero123_linear_end_weight must be between 0.0 and 1.0" assert ( zero123_start_step_percentage >= 0.0 and zero123_start_step_percentage <= 1.0 ), "zero123_start_step_percentage must be between 0.0 and 1.0" assert ( zero123_end_step_percentage >= 0.0 and zero123_end_step_percentage <= 1.0 ), "zero123_end_step_percentage must be between 0.0 and 1.0" video_schedule = torch.linspace( start=video_linear_start_weight, end=video_linear_end_weight, steps=int((video_end_step_percentage - video_start_step_percentage) * num_inference_steps), ) zero123_schedule = torch.linspace( start=zero123_linear_start_weight, end=zero123_linear_end_weight, steps=int((zero123_end_step_percentage - zero123_start_step_percentage) * num_inference_steps), ) if video_schedule.shape[0] < num_inference_steps: video_schedule = torch.cat( [ video_linear_start_weight * torch.ones([video_start_step_percentage * num_inference_steps]), video_schedule, video_linear_end_weight * torch.ones([num_inference_steps - video_end_step_percentage * num_inference_steps]), ] ) if zero123_schedule.shape[0] < num_inference_steps: zero123_schedule = torch.cat( [ zero123_linear_start_weight * torch.ones([zero123_start_step_percentage * num_inference_steps]), zero123_schedule, zero123_linear_end_weight * torch.ones([num_inference_steps - zero123_end_step_percentage * num_inference_steps]), ] ) return (video_schedule, zero123_schedule) def save_videos_grid_zeroscope_nplist(video_frames: List[np.ndarray], path: str, n_rows=6, fps=8, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]): # fourcc = cv2.VideoWriter_fourcc(*"mp4v") f = len(video_frames) h, w, c = video_frames[0].shape #images = [(image).astype("uint8") for image in video_frames] os.makedirs(os.path.dirname(path), exist_ok=True) imageio.imwrite(path, video_frames, fps=fps) def prepare_pipelines( ZERO123_MODEL_ID: str = "bennyguo/zero123-xl-diffusers", VIDEO_MODEL_ID: str = "cerspense/zeroscope_v2_576w", VIDEO_XL_MODEL_ID: str = "cerspense/zeroscope_v2_XL" ): zero123_unet = UNet2DConditionModel.from_pretrained(ZERO123_MODEL_ID, subfolder="unet") zero123_cam_proj = CLIPCameraProjection.from_pretrained(ZERO123_MODEL_ID, subfolder="clip_camera_projection") zero123_img_enc = CLIPVisionModelWithProjection.from_pretrained(ZERO123_MODEL_ID, subfolder="image_encoder") vivid123_pipe = ViVid123Pipeline.from_pretrained( VIDEO_MODEL_ID, # torch_dtype=torch.float16, novel_view_unet=zero123_unet, image_encoder=zero123_img_enc, cc_projection=zero123_cam_proj, ) vivid123_pipe.scheduler = DPMSolverMultistepScheduler.from_config(vivid123_pipe.scheduler.config) # vivid123_pipe.to("cuda") vivid123_pipe.enable_model_cpu_offload() xl_pipe = DiffusionPipeline.from_pretrained(VIDEO_XL_MODEL_ID, torch_dtype=torch.float16) xl_pipe.scheduler = DPMSolverMultistepScheduler.from_config(xl_pipe.scheduler.config) # xl_pipe.to("cuda") xl_pipe.enable_model_cpu_offload() return vivid123_pipe, xl_pipe def generation_vivid123( vivid123_pipe: ViVid123Pipeline, xl_pipe: DiffusionPipeline, config_path: str, output_root_dir: str, ): # loading yaml config _var_matcher = re.compile(r"\${([^}^{]+)}") _tag_matcher = re.compile(r"[^$]*\${([^}^{]+)}.*") def _path_constructor(_loader: Any, node: Any): def replace_fn(match): envparts = f"{match.group(1)}:".split(":") return os.environ.get(envparts[0], envparts[1]) return _var_matcher.sub(replace_fn, node.value) def load_yaml(filename: str) -> dict: yaml.add_implicit_resolver("!envvar", _tag_matcher, None, yaml.SafeLoader) yaml.add_constructor("!envvar", _path_constructor, yaml.SafeLoader) try: with open(filename, "r") as f: return yaml.safe_load(f.read()) except (FileNotFoundError, PermissionError, ParserError): return dict() yaml_loaded = load_yaml(config_path)
cfg = ViVid123BaseSchema.model_validate(yaml_loaded)
2
2023-11-27 22:48:17+00:00
16k
TISUnion/PrimeBackup
prime_backup/action/import_backup_action.py
[ { "identifier": "CreateBackupActionBase", "path": "prime_backup/action/create_backup_action_base.py", "snippet": "class CreateBackupActionBase(Action[BackupInfo], ABC):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.__new_blobs: List[BlobInfo] = []\n\t\tself.__new_blobs_summary: Optional[BlobL...
import contextlib import functools import json import os import shutil import stat import tarfile import threading import time import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import ContextManager, IO, Optional, NamedTuple, List, Dict, Tuple from prime_backup.action.create_backup_action_base import CreateBackupActionBase from prime_backup.compressors import Compressor, CompressMethod from prime_backup.config.config import Config from prime_backup.constants import BACKUP_META_FILE_NAME from prime_backup.db import schema from prime_backup.db.access import DbAccess from prime_backup.db.session import DbSession from prime_backup.exceptions import PrimeBackupError from prime_backup.types.backup_info import BackupInfo from prime_backup.types.backup_meta import BackupMeta from prime_backup.types.standalone_backup_format import StandaloneBackupFormat from prime_backup.types.tar_format import TarFormat from prime_backup.types.units import ByteCount from prime_backup.utils import hash_utils, blob_utils, misc_utils from prime_backup.utils.hash_utils import SizeAndHash
10,818
@contextlib.contextmanager def open(self) -> ContextManager[IO[bytes]]: yield self.tar.extractfile(self.member) class TarFileHolder(PackedBackupFileHandler.FileHolder): def __init__(self, tar: tarfile.TarFile): self.tar = tar def get_member(self, path: str) -> Optional['TarBackupHandler.TarMember']: try: member = self.tar.getmember(path) except KeyError: return None else: return TarBackupHandler.TarMember(self.tar, member) def list_member(self) -> List['TarBackupHandler.TarMember']: return [TarBackupHandler.TarMember(self.tar, member) for member in self.tar.getmembers()] def __init__(self, tar_format: TarFormat): self.tar_format = tar_format @contextlib.contextmanager def open_file(self, path: Path) -> ContextManager[TarFileHolder]: compress_method = self.tar_format.value.compress_method if compress_method == CompressMethod.plain: with tarfile.open(path, mode=self.tar_format.value.mode_r) as tar: yield self.TarFileHolder(tar) else: # zstd stream does not support seek operation, sowe need to extract the tar into a temp path first, # then operate on it. requires extra spaces tho temp_file = Config.get().temp_path / 'import_{}_{}.tmp'.format(os.getpid(), threading.current_thread().ident) temp_file.parent.mkdir(parents=True, exist_ok=True) with contextlib.ExitStack() as exit_stack: exit_stack.callback(functools.partial(temp_file.unlink, missing_ok=True)) Compressor.create(compress_method).copy_decompressed(path, temp_file) with tarfile.open(temp_file, mode=self.tar_format.value.mode_r) as tar: yield self.TarFileHolder(tar) class ZipBackupHandler(PackedBackupFileHandler): class ZipMember(PackedBackupFileHandler.Member): def __init__(self, zipf: zipfile.ZipFile, member: zipfile.ZipInfo): self.zipf = zipf self.member = member mode = (self.member.external_attr >> 16) & 0xFFFF if mode == 0: if self.path.endswith('/'): mode = stat.S_IFDIR | 0o755 else: mode = stat.S_IFREG | 0o644 self.__mode = mode @property def mode(self) -> int: return self.__mode @property def path(self) -> str: return self.member.filename @property def uid(self) -> Optional[int]: return None @property def gid(self) -> Optional[int]: return None @property def mtime_ns(self) -> int: return int(time.mktime(self.member.date_time + (0, 0, -1)) * 1e9) def is_file(self) -> bool: return not self.is_dir() and stat.S_ISREG(self.mode) def is_dir(self) -> bool: return self.member.is_dir() def is_link(self) -> bool: return not self.is_dir() and stat.S_ISLNK(self.mode) def read_link(self) -> str: max_link_size = 10240 with self.open() as f: buf = f.read(max_link_size) if len(buf) == max_link_size: raise ValueError('symlink too large, read {} bytes, peek: {}'.format(len(buf), buf[:20])) return buf.decode('utf8') @contextlib.contextmanager def open(self) -> ContextManager[IO[bytes]]: with self.zipf.open(self.member, 'r') as f: yield f class ZipFileHolder(PackedBackupFileHandler.FileHolder): def __init__(self, zipf: zipfile.ZipFile): self.zipf = zipf def get_member(self, path: str) -> Optional['ZipBackupHandler.ZipMember']: try: member = self.zipf.getinfo(path) except KeyError: return None else: return ZipBackupHandler.ZipMember(self.zipf, member) def list_member(self) -> List['ZipBackupHandler.ZipMember']: return [ZipBackupHandler.ZipMember(self.zipf, member) for member in self.zipf.infolist()] @contextlib.contextmanager def open_file(self, path: Path) -> ContextManager[ZipFileHolder]: with zipfile.ZipFile(path, 'r') as f: yield self.ZipFileHolder(f) class ImportBackupAction(CreateBackupActionBase):
class UnsupportedFormat(PrimeBackupError): pass class BackupMetadataNotFound(PrimeBackupError): pass class _FileDescription(NamedTuple): blob: Optional[schema.Blob] hash: str size: int class PackedBackupFileHandler(ABC): class Member(ABC): @property @abstractmethod def mode(self) -> int: ... @property @abstractmethod def path(self) -> str: ... @property @abstractmethod def uid(self) -> Optional[int]: ... @property @abstractmethod def gid(self) -> Optional[int]: ... @property @abstractmethod def mtime_ns(self) -> int: ... @abstractmethod def is_file(self) -> bool: ... @abstractmethod def is_dir(self) -> bool: ... @abstractmethod def is_link(self) -> bool: ... @abstractmethod def open(self) -> ContextManager[IO[bytes]]: ... @abstractmethod def read_link(self) -> str: ... class FileHolder(ABC): @abstractmethod def get_member(self, path: str) -> Optional['PackedBackupFileHandler.Member']: ... @abstractmethod def list_member(self) -> List['PackedBackupFileHandler.Member']: ... @abstractmethod def open_file(self, path: Path) -> ContextManager[FileHolder]: ... class TarBackupHandler(PackedBackupFileHandler): class TarMember(PackedBackupFileHandler.Member): def __init__(self, tar: tarfile.TarFile, member: tarfile.TarInfo): self.tar = tar self.member = member @property def mode(self) -> int: mode = self.member.mode & 0xFFFF if self.member.isfile(): mode |= stat.S_IFREG elif self.member.isdir(): mode |= stat.S_IFDIR elif self.member.issym(): mode |= stat.S_IFLNK else: raise NotImplementedError('not implemented for type {}'.format(self.member.type)) return mode @property def path(self) -> str: return self.member.path @property def uid(self) -> int: return self.member.uid @property def gid(self) -> int: return self.member.gid @property def mtime_ns(self) -> int: return self.member.mtime * 10 ** 9 def is_file(self) -> bool: return self.member.isfile() def is_dir(self) -> bool: return self.member.isdir() def is_link(self) -> bool: return self.member.issym() def read_link(self) -> str: return self.member.linkpath @contextlib.contextmanager def open(self) -> ContextManager[IO[bytes]]: yield self.tar.extractfile(self.member) class TarFileHolder(PackedBackupFileHandler.FileHolder): def __init__(self, tar: tarfile.TarFile): self.tar = tar def get_member(self, path: str) -> Optional['TarBackupHandler.TarMember']: try: member = self.tar.getmember(path) except KeyError: return None else: return TarBackupHandler.TarMember(self.tar, member) def list_member(self) -> List['TarBackupHandler.TarMember']: return [TarBackupHandler.TarMember(self.tar, member) for member in self.tar.getmembers()] def __init__(self, tar_format: TarFormat): self.tar_format = tar_format @contextlib.contextmanager def open_file(self, path: Path) -> ContextManager[TarFileHolder]: compress_method = self.tar_format.value.compress_method if compress_method == CompressMethod.plain: with tarfile.open(path, mode=self.tar_format.value.mode_r) as tar: yield self.TarFileHolder(tar) else: # zstd stream does not support seek operation, sowe need to extract the tar into a temp path first, # then operate on it. requires extra spaces tho temp_file = Config.get().temp_path / 'import_{}_{}.tmp'.format(os.getpid(), threading.current_thread().ident) temp_file.parent.mkdir(parents=True, exist_ok=True) with contextlib.ExitStack() as exit_stack: exit_stack.callback(functools.partial(temp_file.unlink, missing_ok=True)) Compressor.create(compress_method).copy_decompressed(path, temp_file) with tarfile.open(temp_file, mode=self.tar_format.value.mode_r) as tar: yield self.TarFileHolder(tar) class ZipBackupHandler(PackedBackupFileHandler): class ZipMember(PackedBackupFileHandler.Member): def __init__(self, zipf: zipfile.ZipFile, member: zipfile.ZipInfo): self.zipf = zipf self.member = member mode = (self.member.external_attr >> 16) & 0xFFFF if mode == 0: if self.path.endswith('/'): mode = stat.S_IFDIR | 0o755 else: mode = stat.S_IFREG | 0o644 self.__mode = mode @property def mode(self) -> int: return self.__mode @property def path(self) -> str: return self.member.filename @property def uid(self) -> Optional[int]: return None @property def gid(self) -> Optional[int]: return None @property def mtime_ns(self) -> int: return int(time.mktime(self.member.date_time + (0, 0, -1)) * 1e9) def is_file(self) -> bool: return not self.is_dir() and stat.S_ISREG(self.mode) def is_dir(self) -> bool: return self.member.is_dir() def is_link(self) -> bool: return not self.is_dir() and stat.S_ISLNK(self.mode) def read_link(self) -> str: max_link_size = 10240 with self.open() as f: buf = f.read(max_link_size) if len(buf) == max_link_size: raise ValueError('symlink too large, read {} bytes, peek: {}'.format(len(buf), buf[:20])) return buf.decode('utf8') @contextlib.contextmanager def open(self) -> ContextManager[IO[bytes]]: with self.zipf.open(self.member, 'r') as f: yield f class ZipFileHolder(PackedBackupFileHandler.FileHolder): def __init__(self, zipf: zipfile.ZipFile): self.zipf = zipf def get_member(self, path: str) -> Optional['ZipBackupHandler.ZipMember']: try: member = self.zipf.getinfo(path) except KeyError: return None else: return ZipBackupHandler.ZipMember(self.zipf, member) def list_member(self) -> List['ZipBackupHandler.ZipMember']: return [ZipBackupHandler.ZipMember(self.zipf, member) for member in self.zipf.infolist()] @contextlib.contextmanager def open_file(self, path: Path) -> ContextManager[ZipFileHolder]: with zipfile.ZipFile(path, 'r') as f: yield self.ZipFileHolder(f) class ImportBackupAction(CreateBackupActionBase):
def __init__(self, file_path: Path, backup_format: Optional[StandaloneBackupFormat] = None, *, ensure_meta: bool = True):
11
2023-11-28 19:03:36+00:00
16k
metatube-community/metatube-plex-plugins
MetaTube.bundle/Contents/Libraries/Shared/urllib3/poolmanager.py
[ { "identifier": "HTTPHeaderDict", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(MutableMapping):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-i...
import collections import functools import logging from ._collections import HTTPHeaderDict, RecentlyUsedContainer from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme from .exceptions import ( LocationValueError, MaxRetryError, ProxySchemeUnknown, ProxySchemeUnsupported, URLSchemeUnknown, ) from .packages import six from .packages.six.moves.urllib.parse import urljoin from .request import RequestMethods from .util.proxy import connection_requires_http_tunnel from .util.retry import Retry from .util.url import parse_url
13,339
from __future__ import absolute_import __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # All known keyword arguments that could be provided to the pool manager, its # pools, or the underlying connections. This is used to construct a pool key. _key_fields = ( "key_scheme", # str "key_host", # str "key_port", # int "key_timeout", # int or float or Timeout "key_retries", # int or Retry "key_strict", # bool "key_block", # bool "key_source_address", # str "key_key_file", # str "key_key_password", # str "key_cert_file", # str "key_cert_reqs", # str "key_ca_certs", # str "key_ssl_version", # str "key_ca_cert_dir", # str "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext "key_maxsize", # int "key_headers", # dict "key__proxy", # parsed proxy url "key__proxy_headers", # dict "key__proxy_config", # class "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples "key__socks_options", # dict "key_assert_hostname", # bool or string "key_assert_fingerprint", # str "key_server_hostname", # str ) #: The namedtuple class used to construct keys for the connection pool. #: All custom key schemes should include the fields in this key at a minimum. PoolKey = collections.namedtuple("PoolKey", _key_fields) _proxy_config_fields = ("ssl_context", "use_forwarding_for_https") ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields) def _default_key_normalizer(key_class, request_context): """ Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey """ # Since we mutate the dictionary, make a copy first context = request_context.copy() context["scheme"] = context["scheme"].lower() context["host"] = context["host"].lower() # These are both dictionaries and need to be transformed into frozensets for key in ("headers", "_proxy_headers", "_socks_options"): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. socket_opts = context.get("socket_options") if socket_opts is not None: context["socket_options"] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): context["key_" + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: if field not in context: context[field] = None return key_class(**context) #: A dictionary that maps a scheme to a callable that creates a pool key. #: This can be used to alter the way pool keys are constructed, if desired. #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { "http": functools.partial(_default_key_normalizer, PoolKey), "https": functools.partial(_default_key_normalizer, PoolKey), }
from __future__ import absolute_import __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # All known keyword arguments that could be provided to the pool manager, its # pools, or the underlying connections. This is used to construct a pool key. _key_fields = ( "key_scheme", # str "key_host", # str "key_port", # int "key_timeout", # int or float or Timeout "key_retries", # int or Retry "key_strict", # bool "key_block", # bool "key_source_address", # str "key_key_file", # str "key_key_password", # str "key_cert_file", # str "key_cert_reqs", # str "key_ca_certs", # str "key_ssl_version", # str "key_ca_cert_dir", # str "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext "key_maxsize", # int "key_headers", # dict "key__proxy", # parsed proxy url "key__proxy_headers", # dict "key__proxy_config", # class "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples "key__socks_options", # dict "key_assert_hostname", # bool or string "key_assert_fingerprint", # str "key_server_hostname", # str ) #: The namedtuple class used to construct keys for the connection pool. #: All custom key schemes should include the fields in this key at a minimum. PoolKey = collections.namedtuple("PoolKey", _key_fields) _proxy_config_fields = ("ssl_context", "use_forwarding_for_https") ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields) def _default_key_normalizer(key_class, request_context): """ Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey """ # Since we mutate the dictionary, make a copy first context = request_context.copy() context["scheme"] = context["scheme"].lower() context["host"] = context["host"].lower() # These are both dictionaries and need to be transformed into frozensets for key in ("headers", "_proxy_headers", "_socks_options"): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. socket_opts = context.get("socket_options") if socket_opts is not None: context["socket_options"] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): context["key_" + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: if field not in context: context[field] = None return key_class(**context) #: A dictionary that maps a scheme to a callable that creates a pool key. #: This can be used to alter the way pool keys are constructed, if desired. #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { "http": functools.partial(_default_key_normalizer, PoolKey), "https": functools.partial(_default_key_normalizer, PoolKey), }
pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool}
2
2023-11-27 07:01:39+00:00
16k
IanYeung/MGLD-VSR
ldm/models/diffusion/ddpm_inv.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import torch import torch.nn as nn import os import numpy as np import pytorch_lightning as pl from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler
13,442
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., embedding_reg_weight=0., unfreeze_model=False, model_lr=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema:
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., embedding_reg_weight=0., unfreeze_model=False, model_lr=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema:
self.model_ema = LitEma(self.model)
8
2023-11-30 01:50:29+00:00
16k
Institute4FutureHealth/CHA
tasks/types.py
[ { "identifier": "ActivityAnalysis", "path": "tasks/affect/activity_analysis.py", "snippet": "class ActivityAnalysis(Affect):\n \"\"\"\n **Description:**\n\n This tasks performs average, sum, or trend analysis on the provided raw activity affect data for specific patient.\n \"\"\"\n\n ...
from typing import Dict from typing import Type from tasks.affect import ActivityAnalysis from tasks.affect import ActivityGet from tasks.affect import SleepAnalysis from tasks.affect import SleepGet from tasks.ask_user import AskUser from tasks.google_translator import GoogleTranslate from tasks.playwright import Click from tasks.playwright import CurrentWebPage from tasks.playwright import ExtractHyperlinks from tasks.playwright import ExtractText from tasks.playwright import GetElements from tasks.playwright import Navigate from tasks.playwright import NavigateBack from tasks.read_from_datapipe import ReadDataPipe from tasks.serpapi import SerpAPI from tasks.task import BaseTask from tasks.task_types import TaskType from tasks.test_file import TestFile
14,037
TASK_TO_CLASS: Dict[TaskType, Type[BaseTask]] = { TaskType.SERPAPI: SerpAPI, TaskType.CLICK: Click, TaskType.GET_CURRENT_PAGE: CurrentWebPage, TaskType.EXTRACT_HYPERLINKS: ExtractHyperlinks, TaskType.EXTRACT_TEXT: ExtractText, TaskType.GET_ELEMENTS: GetElements, TaskType.NAVIGATE_BACK: NavigateBack, TaskType.NAVIGATE: Navigate, TaskType.AFFECT_SLEEP_GET: SleepGet, TaskType.AFFECT_ACTIVITY_GET: ActivityGet, TaskType.AFFECT_SLEEP_ANALYSIS: SleepAnalysis, TaskType.AFFECT_ACTIVITY_ANALYSIS: ActivityAnalysis, TaskType.GOOGLE_TRANSLATE: GoogleTranslate, TaskType.ASK_USER: AskUser,
TASK_TO_CLASS: Dict[TaskType, Type[BaseTask]] = { TaskType.SERPAPI: SerpAPI, TaskType.CLICK: Click, TaskType.GET_CURRENT_PAGE: CurrentWebPage, TaskType.EXTRACT_HYPERLINKS: ExtractHyperlinks, TaskType.EXTRACT_TEXT: ExtractText, TaskType.GET_ELEMENTS: GetElements, TaskType.NAVIGATE_BACK: NavigateBack, TaskType.NAVIGATE: Navigate, TaskType.AFFECT_SLEEP_GET: SleepGet, TaskType.AFFECT_ACTIVITY_GET: ActivityGet, TaskType.AFFECT_SLEEP_ANALYSIS: SleepAnalysis, TaskType.AFFECT_ACTIVITY_ANALYSIS: ActivityAnalysis, TaskType.GOOGLE_TRANSLATE: GoogleTranslate, TaskType.ASK_USER: AskUser,
TaskType.TEST_FILE: TestFile,
17
2023-12-02 05:10:44+00:00
16k
Czm369/MixPL
mmdet/models/dense_heads/atss_vlfusion_head.py
[ { "identifier": "MODELS", "path": "mmdet/registry.py", "snippet": "MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmdet.models'])" }, { "identifier": "cat_boxes", "path": "mmdet/structures/bbox/transforms.py", "snippet": "def cat_boxes(data_list: List[Union[Tensor, BaseBo...
import copy import math import torch import torch.nn as nn import torch.nn.functional as F from typing import Callable, List, Optional, Sequence, Tuple, Union from mmcv.cnn import Scale from mmcv.ops.modulated_deform_conv import ModulatedDeformConv2d from mmengine.config import ConfigDict from mmengine.model import BaseModel from mmengine.structures import InstanceData from torch import Tensor from transformers import BertConfig from mmdet.registry import MODELS from mmdet.structures.bbox import cat_boxes from mmdet.utils import InstanceList, OptInstanceList, reduce_mean from ..utils import (BertEncoderLayer, VLFuse, filter_scores_and_topk, permute_and_flatten, select_single_mlvl, unpack_gt_instances) from ..utils.vlfuse_helper import MAX_CLAMP_VALUE from .atss_head import ATSSHead
11,366
with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: list[:obj:`InstanceData`]: Object detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ assert len(bbox_preds) == len(score_factors) num_levels = len(bbox_preds) featmap_sizes = [bbox_preds[i].shape[-2:] for i in range(num_levels)] mlvl_priors = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) result_list = [] for img_id in range(len(batch_img_metas)): img_meta = batch_img_metas[img_id] token_positive_maps = batch_token_positive_maps[img_id] bbox_pred_list = select_single_mlvl( bbox_preds, img_id, detach=True) score_factor_list = select_single_mlvl( score_factors, img_id, detach=True) cls_logit_list = select_single_mlvl( cls_logits, img_id, detach=True) results = self._predict_by_feat_single( bbox_pred_list=bbox_pred_list, score_factor_list=score_factor_list, cls_logit_list=cls_logit_list, mlvl_priors=mlvl_priors, token_positive_maps=token_positive_maps, img_meta=img_meta, cfg=cfg, rescale=rescale, with_nms=with_nms) result_list.append(results) return result_list def _predict_by_feat_single(self, bbox_pred_list: List[Tensor], score_factor_list: List[Tensor], cls_logit_list: List[Tensor], mlvl_priors: List[Tensor], token_positive_maps: dict, img_meta: dict, cfg: ConfigDict, rescale: bool = True, with_nms: bool = True) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image, each item has shape (num_priors * 1, H, W). cls_logit_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid. In all anchor-based methods, it has shape (num_priors, 4). In all anchor-free methods, it has shape (num_priors, 2) when `with_stride=True`, otherwise it still has shape (num_priors, 4). token_positive_maps (dict): Token positive map. img_meta (dict): Image meta info. cfg (mmengine.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ cfg = self.test_cfg if cfg is None else cfg cfg = copy.deepcopy(cfg) img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) score_thr = cfg.get('score_thr', 0) mlvl_bbox_preds = [] mlvl_valid_priors = [] mlvl_scores = [] mlvl_labels = [] for level_idx, (bbox_pred, score_factor, cls_logit, priors) in \ enumerate(zip(bbox_pred_list, score_factor_list, cls_logit_list, mlvl_priors)): bbox_pred = bbox_pred.permute(1, 2, 0).reshape( -1, self.bbox_coder.encode_size) score_factor = score_factor.permute(1, 2, 0).reshape(-1).sigmoid() scores = convert_grounding_to_cls_scores( logits=cls_logit.sigmoid()[None], positive_maps=[token_positive_maps])[0]
# Copyright (c) OpenMMLab. All rights reserved. try: except ImportError: BertConfig = None def convert_grounding_to_cls_scores(logits: Tensor, positive_maps: List[dict]) -> Tensor: """Convert logits to class scores.""" assert len(positive_maps) == logits.shape[0] # batch size scores = torch.zeros(logits.shape[0], logits.shape[1], len(positive_maps[0])).to(logits.device) if positive_maps is not None: if all(x == positive_maps[0] for x in positive_maps): # only need to compute once positive_map = positive_maps[0] for label_j in positive_map: scores[:, :, label_j - 1] = logits[:, :, torch.LongTensor(positive_map[label_j] )].mean(-1) else: for i, positive_map in enumerate(positive_maps): for label_j in positive_map: scores[i, :, label_j - 1] = logits[ i, :, torch.LongTensor(positive_map[label_j])].mean(-1) return scores class Conv3x3Norm(nn.Module): """Conv3x3 and norm.""" def __init__(self, in_channels: int, out_channels: int, stride: int, groups: int = 1, use_dcn: bool = False, norm_type: Optional[Union[Sequence, str]] = None): super().__init__() if use_dcn: self.conv = ModulatedDeformConv2d( in_channels, out_channels, kernel_size=3, stride=stride, padding=1, groups=groups) else: self.conv = nn.Conv2d( in_channels, out_channels, kernel_size=3, stride=stride, padding=1, groups=groups) if isinstance(norm_type, Sequence): assert len(norm_type) == 2 assert norm_type[0] == 'gn' gn_group = norm_type[1] norm_type = norm_type[0] if norm_type == 'bn': bn_op = nn.BatchNorm2d(out_channels) elif norm_type == 'gn': bn_op = nn.GroupNorm( num_groups=gn_group, num_channels=out_channels) if norm_type is not None: self.bn = bn_op else: self.bn = None def forward(self, x, **kwargs): x = self.conv(x, **kwargs) if self.bn: x = self.bn(x) return x class DyReLU(nn.Module): """Dynamic ReLU.""" def __init__(self, in_channels: int, out_channels: int, expand_ratio: int = 4): super().__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.expand_ratio = expand_ratio self.out_channels = out_channels self.fc = nn.Sequential( nn.Linear(in_channels, in_channels // expand_ratio), nn.ReLU(inplace=True), nn.Linear(in_channels // expand_ratio, out_channels * self.expand_ratio), nn.Hardsigmoid(inplace=True)) def forward(self, x) -> Tensor: x_out = x b, c, h, w = x.size() x = self.avg_pool(x).view(b, c) x = self.fc(x).view(b, -1, 1, 1) a1, b1, a2, b2 = torch.split(x, self.out_channels, dim=1) a1 = (a1 - 0.5) * 2 + 1.0 a2 = (a2 - 0.5) * 2 b1 = b1 - 0.5 b2 = b2 - 0.5 out = torch.max(x_out * a1 + b1, x_out * a2 + b2) return out class DyConv(nn.Module): """Dynamic Convolution.""" def __init__(self, conv_func: Callable, in_channels: int, out_channels: int, use_dyfuse: bool = True, use_dyrelu: bool = False, use_dcn: bool = False): super().__init__() self.dyconvs = nn.ModuleList() self.dyconvs.append(conv_func(in_channels, out_channels, 1)) self.dyconvs.append(conv_func(in_channels, out_channels, 1)) self.dyconvs.append(conv_func(in_channels, out_channels, 2)) if use_dyfuse: self.attnconv = nn.Sequential( nn.AdaptiveAvgPool2d(1), nn.Conv2d(in_channels, 1, kernel_size=1), nn.ReLU(inplace=True)) self.h_sigmoid = nn.Hardsigmoid(inplace=True) else: self.attnconv = None if use_dyrelu: self.relu = DyReLU(in_channels, out_channels) else: self.relu = nn.ReLU() if use_dcn: self.offset = nn.Conv2d( in_channels, 27, kernel_size=3, stride=1, padding=1) else: self.offset = None self.init_weights() def init_weights(self): for m in self.dyconvs.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight.data, 0, 0.01) if m.bias is not None: m.bias.data.zero_() if self.attnconv is not None: for m in self.attnconv.modules(): if isinstance(m, nn.Conv2d): nn.init.normal_(m.weight.data, 0, 0.01) if m.bias is not None: m.bias.data.zero_() def forward(self, inputs: dict) -> dict: visual_feats = inputs['visual'] out_vis_feats = [] for level, feature in enumerate(visual_feats): offset_conv_args = {} if self.offset is not None: offset_mask = self.offset(feature) offset = offset_mask[:, :18, :, :] mask = offset_mask[:, 18:, :, :].sigmoid() offset_conv_args = dict(offset=offset, mask=mask) temp_feats = [self.dyconvs[1](feature, **offset_conv_args)] if level > 0: temp_feats.append(self.dyconvs[2](visual_feats[level - 1], **offset_conv_args)) if level < len(visual_feats) - 1: temp_feats.append( F.upsample_bilinear( self.dyconvs[0](visual_feats[level + 1], **offset_conv_args), size=[feature.size(2), feature.size(3)])) mean_feats = torch.mean( torch.stack(temp_feats), dim=0, keepdim=False) if self.attnconv is not None: attn_feat = [] res_feat = [] for feat in temp_feats: res_feat.append(feat) attn_feat.append(self.attnconv(feat)) res_feat = torch.stack(res_feat) spa_pyr_attn = self.h_sigmoid(torch.stack(attn_feat)) mean_feats = torch.mean( res_feat * spa_pyr_attn, dim=0, keepdim=False) out_vis_feats.append(mean_feats) out_vis_feats = [self.relu(item) for item in out_vis_feats] features_dict = {'visual': out_vis_feats, 'lang': inputs['lang']} return features_dict class VLFusionModule(BaseModel): """Visual-lang Fusion Module.""" def __init__(self, in_channels: int, feat_channels: int, num_base_priors: int, early_fuse: bool = False, num_dyhead_blocks: int = 6, lang_model_name: str = 'bert-base-uncased', use_dyrelu: bool = True, use_dyfuse: bool = True, use_dcn: bool = True, use_checkpoint: bool = False, **kwargs) -> None: super().__init__(**kwargs) if BertConfig is None: raise RuntimeError( 'transformers is not installed, please install it by: ' 'pip install transformers.') self.in_channels = in_channels self.feat_channels = feat_channels self.num_base_priors = num_base_priors self.early_fuse = early_fuse self.num_dyhead_blocks = num_dyhead_blocks self.use_dyrelu = use_dyrelu self.use_dyfuse = use_dyfuse self.use_dcn = use_dcn self.use_checkpoint = use_checkpoint self.lang_cfg = BertConfig.from_pretrained(lang_model_name) self.lang_dim = self.lang_cfg.hidden_size self._init_layers() def _init_layers(self) -> None: """Initialize layers of the model.""" bias_value = -math.log((1 - 0.01) / 0.01) dyhead_tower = [] for i in range(self.num_dyhead_blocks): if self.early_fuse: # cross-modality fusion dyhead_tower.append(VLFuse(use_checkpoint=self.use_checkpoint)) # lang branch dyhead_tower.append( BertEncoderLayer( self.lang_cfg, clamp_min_for_underflow=True, clamp_max_for_overflow=True)) # vision branch dyhead_tower.append( DyConv( lambda i, o, s: Conv3x3Norm( i, o, s, use_dcn=self.use_dcn, norm_type=['gn', 16]), self.in_channels if i == 0 else self.feat_channels, self.feat_channels, use_dyrelu=(self.use_dyrelu and self.in_channels == self.feat_channels) if i == 0 else self.use_dyrelu, use_dyfuse=(self.use_dyfuse and self.in_channels == self.feat_channels) if i == 0 else self.use_dyfuse, use_dcn=(self.use_dcn and self.in_channels == self.feat_channels) if i == 0 else self.use_dcn, )) self.add_module('dyhead_tower', nn.Sequential(*dyhead_tower)) self.bbox_pred = nn.Conv2d( self.feat_channels, self.num_base_priors * 4, kernel_size=1) self.centerness = nn.Conv2d( self.feat_channels, self.num_base_priors * 1, kernel_size=1) self.dot_product_projection_text = nn.Linear( self.lang_dim, self.num_base_priors * self.feat_channels, bias=True) self.log_scale = nn.Parameter(torch.Tensor([0.0]), requires_grad=True) self.bias_lang = nn.Parameter( torch.zeros(self.lang_dim), requires_grad=True) self.bias0 = nn.Parameter( torch.Tensor([bias_value]), requires_grad=True) self.scales = nn.ModuleList([Scale(1.0) for _ in range(5)]) def forward(self, visual_feats: Tuple[Tensor], language_feats: dict) -> Tuple: feat_inputs = {'visual': visual_feats, 'lang': language_feats} dyhead_tower = self.dyhead_tower(feat_inputs) if self.early_fuse: embedding = dyhead_tower['lang']['hidden'] else: embedding = language_feats['embedded'] embedding = F.normalize(embedding, p=2, dim=-1) dot_product_proj_tokens = self.dot_product_projection_text(embedding / 2.0) dot_product_proj_tokens_bias = torch.matmul( embedding, self.bias_lang) + self.bias0 bbox_preds = [] centerness = [] cls_logits = [] for i, feature in enumerate(visual_feats): visual = dyhead_tower['visual'][i] B, C, H, W = visual.shape bbox_pred = self.scales[i](self.bbox_pred(visual)) bbox_preds.append(bbox_pred) centerness.append(self.centerness(visual)) dot_product_proj_queries = permute_and_flatten( visual, B, self.num_base_priors, C, H, W) bias = dot_product_proj_tokens_bias.unsqueeze(1).repeat( 1, self.num_base_priors, 1) dot_product_logit = ( torch.matmul(dot_product_proj_queries, dot_product_proj_tokens.transpose(-1, -2)) / self.log_scale.exp()) + bias dot_product_logit = torch.clamp( dot_product_logit, max=MAX_CLAMP_VALUE) dot_product_logit = torch.clamp( dot_product_logit, min=-MAX_CLAMP_VALUE) cls_logits.append(dot_product_logit) return bbox_preds, centerness, cls_logits @MODELS.register_module() class ATSSVLFusionHead(ATSSHead): """ATSS head with visual-language fusion module. Args: early_fuse (bool): Whether to fuse visual and language features Defaults to False. use_checkpoint (bool): Whether to use checkpoint. Defaults to False. num_dyhead_blocks (int): Number of dynamic head blocks. Defaults to 6. lang_model_name (str): Name of the language model. Defaults to 'bert-base-uncased'. """ def __init__(self, *args, early_fuse: bool = False, use_checkpoint: bool = False, num_dyhead_blocks: int = 6, lang_model_name: str = 'bert-base-uncased', init_cfg=None, **kwargs): super().__init__(*args, **kwargs, init_cfg=init_cfg) self.head = VLFusionModule( in_channels=self.in_channels, feat_channels=self.feat_channels, num_base_priors=self.num_base_priors, early_fuse=early_fuse, use_checkpoint=use_checkpoint, num_dyhead_blocks=num_dyhead_blocks, lang_model_name=lang_model_name) self.text_masks = None def _init_layers(self) -> None: """No need to initialize the ATSS head layer.""" pass def forward(self, visual_feats: Tuple[Tensor], language_feats: dict) -> Tuple[Tensor]: """Forward function.""" bbox_preds, centerness, cls_logits = self.head(visual_feats, language_feats) return cls_logits, bbox_preds, centerness def loss(self, visual_feats: Tuple[Tensor], language_feats: dict, batch_data_samples): outputs = unpack_gt_instances(batch_data_samples) (batch_gt_instances, batch_gt_instances_ignore, batch_img_metas) = outputs outs = self(visual_feats, language_feats) self.text_masks = language_feats['masks'] loss_inputs = outs + (batch_gt_instances, batch_img_metas, batch_gt_instances_ignore) losses = self.loss_by_feat(*loss_inputs) return losses def loss_by_feat( self, cls_scores: List[Tensor], bbox_preds: List[Tensor], centernesses: List[Tensor], batch_gt_instances: InstanceList, batch_img_metas: List[dict], batch_gt_instances_ignore: OptInstanceList = None) -> dict: """Calculate the loss based on the features extracted by the detection head. Args: cls_scores (list[Tensor]): Box scores for each scale level Has shape (N, num_anchors * num_classes, H, W) bbox_preds (list[Tensor]): Box energies / deltas for each scale level with shape (N, num_anchors * 4, H, W) centernesses (list[Tensor]): Centerness for each scale level with shape (N, num_anchors * 1, H, W) batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. It usually includes ``bboxes`` and ``labels`` attributes. batch_img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional): Batch of gt_instances_ignore. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. Returns: dict[str, Tensor]: A dictionary of loss components. """ featmap_sizes = [featmap.size()[-2:] for featmap in bbox_preds] assert len(featmap_sizes) == self.prior_generator.num_levels device = cls_scores[0].device anchor_list, valid_flag_list = self.get_anchors( featmap_sizes, batch_img_metas, device=device) cls_reg_targets = self.get_targets( anchor_list, valid_flag_list, batch_gt_instances, batch_img_metas, batch_gt_instances_ignore=batch_gt_instances_ignore) (anchor_list, labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, avg_factor) = cls_reg_targets avg_factor = reduce_mean( torch.tensor(avg_factor, dtype=torch.float, device=device)).item() anchors = torch.cat(anchor_list, dim=1) labels = torch.cat(labels_list, dim=1) label_weights = torch.cat(label_weights_list, dim=1) bbox_targets = torch.cat(bbox_targets_list, dim=1) cls_scores = torch.cat(cls_scores, dim=1) centernesses_ = [] bbox_preds_ = [] for bbox_pred, centerness in zip(bbox_preds, centernesses): centernesses_.append( centerness.permute(0, 2, 3, 1).reshape(cls_scores.size(0), -1, 1)) bbox_preds_.append( bbox_pred.permute(0, 2, 3, 1).reshape(cls_scores.size(0), -1, 4)) bbox_preds = torch.cat(bbox_preds_, dim=1) centernesses = torch.cat(centernesses_, dim=1) losses_cls, losses_bbox, loss_centerness, bbox_avg_factor = \ self._loss_by_feat( anchors, cls_scores, bbox_preds, centernesses, labels, label_weights, bbox_targets, avg_factor=avg_factor) bbox_avg_factor = reduce_mean(bbox_avg_factor).clamp_(min=1).item() losses_bbox = losses_bbox / bbox_avg_factor return dict( loss_cls=losses_cls, loss_bbox=losses_bbox, loss_centerness=loss_centerness) def _loss_by_feat(self, anchors: Tensor, cls_score: Tensor, bbox_pred: Tensor, centerness: Tensor, labels: Tensor, label_weights: Tensor, bbox_targets: Tensor, avg_factor: float) -> dict: """Calculate the loss of all scale level based on the features extracted by the detection head. Returns: dict[str, Tensor]: A dictionary of loss components. """ anchors = anchors.reshape(-1, 4) # ===== this change ===== pos_inds = (labels.sum(-1) > 0).reshape(-1) # Loss is not computed for the padded regions of the text. assert (self.text_masks.dim() == 2) text_mask = (self.text_masks > 0).unsqueeze(1) text_mask = text_mask.repeat(1, cls_score.size(1), 1) cls_score = torch.masked_select(cls_score, text_mask).contiguous() labels = torch.masked_select(labels, text_mask) label_weights = label_weights[..., None].repeat(1, 1, text_mask.size(-1)) label_weights = torch.masked_select(label_weights, text_mask) bbox_pred = bbox_pred.reshape(-1, 4) centerness = centerness.reshape(-1) bbox_targets = bbox_targets.reshape(-1, 4) labels = labels.reshape(-1) label_weights = label_weights.reshape(-1) # classification loss loss_cls = self.loss_cls( cls_score, labels, label_weights, avg_factor=avg_factor) if pos_inds.sum() > 0: pos_bbox_targets = bbox_targets[pos_inds] pos_bbox_pred = bbox_pred[pos_inds] pos_anchors = anchors[pos_inds] pos_centerness = centerness[pos_inds] centerness_targets = self.centerness_target( pos_anchors, pos_bbox_targets) if torch.isnan(centerness_targets).any(): print('=====Centerness includes NaN=====') mask = ~torch.isnan(centerness_targets) centerness_targets = centerness_targets[mask] pos_centerness = pos_centerness[mask] pos_anchors = pos_anchors[mask] pos_bbox_targets = pos_bbox_targets[mask] pos_bbox_pred = pos_bbox_pred[mask] if pos_bbox_targets.shape[0] == 0: loss_bbox = bbox_pred.sum() * 0 loss_centerness = centerness.sum() * 0 centerness_targets = bbox_targets.new_tensor(0.) return loss_cls, loss_bbox, loss_centerness, \ centerness_targets.sum() # The decoding process takes the offset into consideration. pos_anchors[:, 2:] += 1 pos_decode_bbox_pred = self.bbox_coder.decode( pos_anchors, pos_bbox_pred) # regression loss loss_bbox = self.loss_bbox( pos_decode_bbox_pred, pos_bbox_targets, weight=centerness_targets, avg_factor=1.0) # centerness loss loss_centerness = self.loss_centerness( pos_centerness, centerness_targets, avg_factor=avg_factor) else: loss_bbox = bbox_pred.sum() * 0 loss_centerness = centerness.sum() * 0 centerness_targets = bbox_targets.new_tensor(0.) return loss_cls, loss_bbox, loss_centerness, centerness_targets.sum() def _get_targets_single(self, flat_anchors: Tensor, valid_flags: Tensor, num_level_anchors: List[int], gt_instances: InstanceData, img_meta: dict, gt_instances_ignore: Optional[InstanceData] = None, unmap_outputs: bool = True) -> tuple: """Compute regression, classification targets for anchors in a single image. Args: flat_anchors (Tensor): Multi-level anchors of the image, which are concatenated into a single tensor of shape (num_anchors ,4) valid_flags (Tensor): Multi level valid flags of the image, which are concatenated into a single tensor of shape (num_anchors,). num_level_anchors (List[int]): Number of anchors of each scale level. gt_instances (:obj:`InstanceData`): Ground truth of instance annotations. It usually includes ``bboxes`` and ``labels`` attributes. img_meta (dict): Meta information for current image. gt_instances_ignore (:obj:`InstanceData`, optional): Instances to be ignored during training. It includes ``bboxes`` attribute data that is ignored during training and testing. Defaults to None. unmap_outputs (bool): Whether to map outputs back to the original set of anchors. Returns: tuple: N is the number of total anchors in the image. labels (Tensor): Labels of all anchors in the image with shape (N,). label_weights (Tensor): Label weights of all anchor in the image with shape (N,). bbox_targets (Tensor): BBox targets of all anchors in the image with shape (N, 4). bbox_weights (Tensor): BBox weights of all anchors in the image with shape (N, 4) pos_inds (Tensor): Indices of positive anchor with shape (num_pos,). neg_inds (Tensor): Indices of negative anchor with shape (num_neg,). sampling_result (:obj:`SamplingResult`): Sampling results. """ anchors = flat_anchors # Align the official implementation anchors[:, 2:] -= 1 num_level_anchors_inside = num_level_anchors pred_instances = InstanceData(priors=anchors) assign_result = self.assigner.assign(pred_instances, num_level_anchors_inside, gt_instances, gt_instances_ignore) sampling_result = self.sampler.sample(assign_result, pred_instances, gt_instances) num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) # ===== this change ===== labels = anchors.new_full((num_valid_anchors, self.feat_channels), 0, dtype=torch.float32) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: if self.reg_decoded_bbox: pos_bbox_targets = sampling_result.pos_gt_bboxes else: pos_bbox_targets = self.bbox_coder.encode( sampling_result.pos_priors, sampling_result.pos_gt_bboxes) bbox_targets[pos_inds, :] = pos_bbox_targets bbox_weights[pos_inds, :] = 1.0 # ===== this change ===== labels[pos_inds] = gt_instances.positive_maps[ sampling_result.pos_assigned_gt_inds] if self.train_cfg['pos_weight'] <= 0: label_weights[pos_inds] = 1.0 else: label_weights[pos_inds] = self.train_cfg['pos_weight'] if len(neg_inds) > 0: label_weights[neg_inds] = 1.0 return (anchors, labels, label_weights, bbox_targets, bbox_weights, pos_inds, neg_inds, sampling_result) def centerness_target(self, anchors: Tensor, gts: Tensor) -> Tensor: """Calculate the centerness between anchors and gts. Only calculate pos centerness targets, otherwise there may be nan. Args: anchors (Tensor): Anchors with shape (N, 4), "xyxy" format. gts (Tensor): Ground truth bboxes with shape (N, 4), "xyxy" format. Returns: Tensor: Centerness between anchors and gts. """ anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2 anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2 l_ = anchors_cx - gts[:, 0] t_ = anchors_cy - gts[:, 1] r_ = gts[:, 2] - anchors_cx b_ = gts[:, 3] - anchors_cy left_right = torch.stack([l_, r_], dim=1) top_bottom = torch.stack([t_, b_], dim=1) centerness = torch.sqrt( (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])) # assert not torch.isnan(centerness).any() return centerness def predict(self, visual_feats: Tuple[Tensor], language_feats: dict, batch_data_samples, rescale: bool = True): """Perform forward propagation of the detection head and predict detection results on the features of the upstream network. Args: visual_feats (tuple[Tensor]): Multi-level visual features from the upstream network, each is a 4D-tensor. language_feats (dict): Language features from the upstream network. batch_data_samples (List[:obj:`DetDataSample`]): The Data Samples. It usually includes information such as `gt_instance`, `gt_panoptic_seg` and `gt_sem_seg`. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list[obj:`InstanceData`]: Detection results of each image after the post process. """ batch_img_metas = [ data_samples.metainfo for data_samples in batch_data_samples ] batch_token_positive_maps = [ data_samples.token_positive_map for data_samples in batch_data_samples ] outs = self(visual_feats, language_feats) predictions = self.predict_by_feat( *outs, batch_img_metas=batch_img_metas, batch_token_positive_maps=batch_token_positive_maps, rescale=rescale) return predictions def predict_by_feat(self, cls_logits: List[Tensor], bbox_preds: List[Tensor], score_factors: List[Tensor], batch_img_metas: Optional[List[dict]] = None, batch_token_positive_maps: Optional[List[dict]] = None, cfg: Optional[ConfigDict] = None, rescale: bool = False, with_nms: bool = True) -> InstanceList: """Transform a batch of output features extracted from the head into bbox results. Note: When score_factors is not None, the cls_scores are usually multiplied by it then obtain the real score used in NMS, such as CenterNess in FCOS, IoU branch in ATSS. Args: cls_logits (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * num_classes, H, W). bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, has shape (batch_size, num_priors * 4, H, W). score_factors (list[Tensor], optional): Score factor for all scale level, each is a 4D-tensor, has shape (batch_size, num_priors * 1, H, W). Defaults to None. batch_img_metas (list[dict], Optional): Batch image meta info. Defaults to None. batch_token_positive_maps (list[dict], Optional): Batch token positive map. Defaults to None. cfg (ConfigDict, optional): Test / postprocessing configuration, if None, test_cfg would be used. Defaults to None. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: list[:obj:`InstanceData`]: Object detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ assert len(bbox_preds) == len(score_factors) num_levels = len(bbox_preds) featmap_sizes = [bbox_preds[i].shape[-2:] for i in range(num_levels)] mlvl_priors = self.prior_generator.grid_priors( featmap_sizes, dtype=bbox_preds[0].dtype, device=bbox_preds[0].device) result_list = [] for img_id in range(len(batch_img_metas)): img_meta = batch_img_metas[img_id] token_positive_maps = batch_token_positive_maps[img_id] bbox_pred_list = select_single_mlvl( bbox_preds, img_id, detach=True) score_factor_list = select_single_mlvl( score_factors, img_id, detach=True) cls_logit_list = select_single_mlvl( cls_logits, img_id, detach=True) results = self._predict_by_feat_single( bbox_pred_list=bbox_pred_list, score_factor_list=score_factor_list, cls_logit_list=cls_logit_list, mlvl_priors=mlvl_priors, token_positive_maps=token_positive_maps, img_meta=img_meta, cfg=cfg, rescale=rescale, with_nms=with_nms) result_list.append(results) return result_list def _predict_by_feat_single(self, bbox_pred_list: List[Tensor], score_factor_list: List[Tensor], cls_logit_list: List[Tensor], mlvl_priors: List[Tensor], token_positive_maps: dict, img_meta: dict, cfg: ConfigDict, rescale: bool = True, with_nms: bool = True) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: bbox_pred_list (list[Tensor]): Box energies / deltas from all scale levels of a single image, each item has shape (num_priors * 4, H, W). score_factor_list (list[Tensor]): Score factor from all scale levels of a single image, each item has shape (num_priors * 1, H, W). cls_logit_list (list[Tensor]): Box scores from all scale levels of a single image, each item has shape (num_priors * num_classes, H, W). mlvl_priors (list[Tensor]): Each element in the list is the priors of a single level in feature pyramid. In all anchor-based methods, it has shape (num_priors, 4). In all anchor-free methods, it has shape (num_priors, 2) when `with_stride=True`, otherwise it still has shape (num_priors, 4). token_positive_maps (dict): Token positive map. img_meta (dict): Image meta info. cfg (mmengine.Config): Test / postprocessing configuration, if None, test_cfg would be used. rescale (bool): If True, return boxes in original image space. Defaults to False. with_nms (bool): If True, do nms before return boxes. Defaults to True. Returns: :obj:`InstanceData`: Detection results of each image after the post process. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ cfg = self.test_cfg if cfg is None else cfg cfg = copy.deepcopy(cfg) img_shape = img_meta['img_shape'] nms_pre = cfg.get('nms_pre', -1) score_thr = cfg.get('score_thr', 0) mlvl_bbox_preds = [] mlvl_valid_priors = [] mlvl_scores = [] mlvl_labels = [] for level_idx, (bbox_pred, score_factor, cls_logit, priors) in \ enumerate(zip(bbox_pred_list, score_factor_list, cls_logit_list, mlvl_priors)): bbox_pred = bbox_pred.permute(1, 2, 0).reshape( -1, self.bbox_coder.encode_size) score_factor = score_factor.permute(1, 2, 0).reshape(-1).sigmoid() scores = convert_grounding_to_cls_scores( logits=cls_logit.sigmoid()[None], positive_maps=[token_positive_maps])[0]
results = filter_scores_and_topk(
4
2023-11-30 08:58:00+00:00
16k
SEU-ProactiveSecurity-Group/MalPurifier
examples/md_nn_test.py
[ { "identifier": "Dataset", "path": "core/defense/dataset.py", "snippet": "class Dataset(torch.utils.data.Dataset):\n def __init__(self, seed=0, device='cuda', feature_ext_args=None):\n \"\"\"\n 为机器学习模型学习构建数据集。\n \n :param seed: 随机种子\n :param device: 设备类型,'cuda' 或 'c...
import os.path as path import argparse import time import numpy from core.defense import Dataset from core.defense import MalwareDetectionDNN from tools.utils import save_args, get_group_args, to_tensor, dump_pickle, read_pickle
11,763
# 使用未来版本特性,确保代码在Python2和Python3中有一致的行为 from __future__ import absolute_import from __future__ import division from __future__ import print_function # 导入所需的库 # 导入自定义模块 # 初始化argparse对象,用于解析命令行参数 cmd_md = argparse.ArgumentParser(description='arguments for learning malware detector') # 定义与特征提取相关的命令行参数 feature_argparse = cmd_md.add_argument_group(title='feature') feature_argparse.add_argument('--proc_number', type=int, default=2, help='The number of threads for features extraction.') # 特征提取的线程数量 feature_argparse.add_argument('--number_of_smali_files', type=int, default=1000000, help='The maximum number of smali files to represent each app') # 表示每个应用的smali文件的最大数量 feature_argparse.add_argument('--max_vocab_size', type=int, default=10000, help='The maximum number of vocabulary size') # 词汇的最大数量 feature_argparse.add_argument('--update', action='store_true', help='Whether update the existed features.') # 是否更新已存在的特征 # 定义与检测器相关的命令行参数 detector_argparse = cmd_md.add_argument_group(title='detector') detector_argparse.add_argument('--cuda', action='store_true', default=False, help='whether use cuda enable gpu or cpu.') # 是否使用CUDA启用GPU detector_argparse.add_argument('--seed', type=int, default=0, help='random seed.') # 随机种子 detector_argparse.add_argument('--dense_hidden_units', type=lambda s: [int(u) for u in s.split(',')], default='200,200', help='delimited list input, e.g., "200,200"') # 密集隐藏单元列表 detector_argparse.add_argument('--dropout', type=float, default=0.6, help='dropout rate') # dropout率 detector_argparse.add_argument('--alpha_', type=float, default=0.2, help='slope coefficient of leaky-relu or elu') # leaky-relu或elu的斜率系数 detector_argparse.add_argument('--smooth', action='store_true', default=False, help='use smooth activation elu (rather than leaky-relu) in the GAT layer.') # 在GAT层使用平滑激活函数elu detector_argparse.add_argument('--batch_size', type=int, default=128, help='mini-batch size') # mini-batch大小 detector_argparse.add_argument('--epochs', type=int, default=50, help='number of epochs to train.') # 训练的epoch数 detector_argparse.add_argument('--lr', type=float, default=0.001, help='initial learning rate.') # 初始学习率 detector_argparse.add_argument('--weight_decay', type=float, default=0e-4, help='coefficient of weight decay') # 权重衰减系数 # 定义与数据集相关的命令行参数 dataset_argparse = cmd_md.add_argument_group(title='data_producer') detector_argparse.add_argument('--cache', action='store_true', default=False, help='use cache data or not.') # 是否使用缓存数据 # 定义与模式相关的命令行参数 mode_argparse = cmd_md.add_argument_group(title='mode') mode_argparse.add_argument('--mode', type=str, default='train', choices=['train', 'test'], required=False, help='learn a model or test it.') # 学习模型或测试模型的模式 mode_argparse.add_argument('--model_name', type=str, default='xxxxxxxx-xxxxxx', required=False, help='suffix date of a tested model name.') # 测试模型名称的后缀日期 # 定义主函数 def _main(): args = cmd_md.parse_args() # 根据参数创建数据集 dataset = Dataset(feature_ext_args=get_group_args(args, cmd_md, 'feature')) # 获取训练数据集输入生成器 train_dataset_producer = dataset.get_input_producer(*dataset.train_dataset, batch_size=args.batch_size, name='train', use_cache=args.cache) # 获取验证数据集输入生成器 val_dataset_producer = dataset.get_input_producer(*dataset.validation_dataset, batch_size=args.batch_size, name='val') # 获取测试数据集输入生成器 test_dataset_producer = dataset.get_input_producer(*dataset.test_dataset, batch_size=args.batch_size, name='test') # 确保数据集的类别数为2 assert dataset.n_classes == 2 # 设置设备为CPU或CUDA if not args.cuda: dv = 'cpu' else: dv = 'cuda' # 设置模型名称 model_name = args.model_name if args.mode == 'test' else time.strftime("%Y%m%d-%H%M%S") # 创建模型实例 model = MalwareDetectionDNN(dataset.vocab_size, dataset.n_classes, device=dv, name=model_name, **vars(args) ) # 将模型移至指定设备并转换为双精度浮点数 model = model.to(dv).double() # 如果模式为训练,则进行模型拟合 if args.mode == 'train': model.fit(train_dataset_producer, val_dataset_producer, epochs=args.epochs, lr=args.lr, weight_decay=args.weight_decay ) # 将参数以人类可读的方式保存
# 使用未来版本特性,确保代码在Python2和Python3中有一致的行为 from __future__ import absolute_import from __future__ import division from __future__ import print_function # 导入所需的库 # 导入自定义模块 # 初始化argparse对象,用于解析命令行参数 cmd_md = argparse.ArgumentParser(description='arguments for learning malware detector') # 定义与特征提取相关的命令行参数 feature_argparse = cmd_md.add_argument_group(title='feature') feature_argparse.add_argument('--proc_number', type=int, default=2, help='The number of threads for features extraction.') # 特征提取的线程数量 feature_argparse.add_argument('--number_of_smali_files', type=int, default=1000000, help='The maximum number of smali files to represent each app') # 表示每个应用的smali文件的最大数量 feature_argparse.add_argument('--max_vocab_size', type=int, default=10000, help='The maximum number of vocabulary size') # 词汇的最大数量 feature_argparse.add_argument('--update', action='store_true', help='Whether update the existed features.') # 是否更新已存在的特征 # 定义与检测器相关的命令行参数 detector_argparse = cmd_md.add_argument_group(title='detector') detector_argparse.add_argument('--cuda', action='store_true', default=False, help='whether use cuda enable gpu or cpu.') # 是否使用CUDA启用GPU detector_argparse.add_argument('--seed', type=int, default=0, help='random seed.') # 随机种子 detector_argparse.add_argument('--dense_hidden_units', type=lambda s: [int(u) for u in s.split(',')], default='200,200', help='delimited list input, e.g., "200,200"') # 密集隐藏单元列表 detector_argparse.add_argument('--dropout', type=float, default=0.6, help='dropout rate') # dropout率 detector_argparse.add_argument('--alpha_', type=float, default=0.2, help='slope coefficient of leaky-relu or elu') # leaky-relu或elu的斜率系数 detector_argparse.add_argument('--smooth', action='store_true', default=False, help='use smooth activation elu (rather than leaky-relu) in the GAT layer.') # 在GAT层使用平滑激活函数elu detector_argparse.add_argument('--batch_size', type=int, default=128, help='mini-batch size') # mini-batch大小 detector_argparse.add_argument('--epochs', type=int, default=50, help='number of epochs to train.') # 训练的epoch数 detector_argparse.add_argument('--lr', type=float, default=0.001, help='initial learning rate.') # 初始学习率 detector_argparse.add_argument('--weight_decay', type=float, default=0e-4, help='coefficient of weight decay') # 权重衰减系数 # 定义与数据集相关的命令行参数 dataset_argparse = cmd_md.add_argument_group(title='data_producer') detector_argparse.add_argument('--cache', action='store_true', default=False, help='use cache data or not.') # 是否使用缓存数据 # 定义与模式相关的命令行参数 mode_argparse = cmd_md.add_argument_group(title='mode') mode_argparse.add_argument('--mode', type=str, default='train', choices=['train', 'test'], required=False, help='learn a model or test it.') # 学习模型或测试模型的模式 mode_argparse.add_argument('--model_name', type=str, default='xxxxxxxx-xxxxxx', required=False, help='suffix date of a tested model name.') # 测试模型名称的后缀日期 # 定义主函数 def _main(): args = cmd_md.parse_args() # 根据参数创建数据集 dataset = Dataset(feature_ext_args=get_group_args(args, cmd_md, 'feature')) # 获取训练数据集输入生成器 train_dataset_producer = dataset.get_input_producer(*dataset.train_dataset, batch_size=args.batch_size, name='train', use_cache=args.cache) # 获取验证数据集输入生成器 val_dataset_producer = dataset.get_input_producer(*dataset.validation_dataset, batch_size=args.batch_size, name='val') # 获取测试数据集输入生成器 test_dataset_producer = dataset.get_input_producer(*dataset.test_dataset, batch_size=args.batch_size, name='test') # 确保数据集的类别数为2 assert dataset.n_classes == 2 # 设置设备为CPU或CUDA if not args.cuda: dv = 'cpu' else: dv = 'cuda' # 设置模型名称 model_name = args.model_name if args.mode == 'test' else time.strftime("%Y%m%d-%H%M%S") # 创建模型实例 model = MalwareDetectionDNN(dataset.vocab_size, dataset.n_classes, device=dv, name=model_name, **vars(args) ) # 将模型移至指定设备并转换为双精度浮点数 model = model.to(dv).double() # 如果模式为训练,则进行模型拟合 if args.mode == 'train': model.fit(train_dataset_producer, val_dataset_producer, epochs=args.epochs, lr=args.lr, weight_decay=args.weight_decay ) # 将参数以人类可读的方式保存
save_args(path.join(path.dirname(model.model_save_path), "hparam"), vars(args))
2
2023-11-27 02:00:23+00:00
16k
Vali-98/XTTS-RVC-UI
rvc.py
[ { "identifier": "SynthesizerTrnMs256NSFsid", "path": "infer_pack/models.py", "snippet": "class SynthesizerTrnMs256NSFsid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_he...
from multiprocessing import cpu_count from pathlib import Path from fairseq import checkpoint_utils from scipy.io import wavfile from infer_pack.models import ( SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono, SynthesizerTrnMs768NSFsid, SynthesizerTrnMs768NSFsid_nono, ) from vc_infer_pipeline import VC import torch import librosa import numpy as np
11,095
class Config: def __init__(self, device, is_half): self.device = device self.is_half = is_half self.n_cpu = 0 self.gpu_name = None self.gpu_mem = None self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() def device_config(self) -> tuple: if torch.cuda.is_available(): i_device = int(self.device.split(":")[-1]) self.gpu_name = torch.cuda.get_device_name(i_device) if ( ("16" in self.gpu_name and "V100" not in self.gpu_name.upper()) or "P40" in self.gpu_name.upper() or "1060" in self.gpu_name or "1070" in self.gpu_name or "1080" in self.gpu_name ): print("16 series/10 series P40 forced single precision") self.is_half = False else: self.gpu_name = None self.gpu_mem = int( torch.cuda.get_device_properties(i_device).total_memory / 1024 / 1024 / 1024 + 0.4 ) if self.gpu_mem <= 2: print('Not enough VRAM to load models (Probably)') self.device = 'cpu' elif torch.backends.mps.is_available(): print("No supported N-card found, use MPS for inference") self.device = "mps" else: print("No supported N-card found, use CPU for inference") self.device = "cpu" if self.n_cpu == 0: self.n_cpu = cpu_count() if self.is_half: # 6G memory config x_pad = 3 x_query = 10 x_center = 60 x_max = 65 else: # 5G memory config x_pad = 1 x_query = 6 x_center = 38 x_max = 41 if self.gpu_mem != None and self.gpu_mem <= 4: x_pad = 1 x_query = 5 x_center = 30 x_max = 32 return x_pad, x_query, x_center, x_max def load_hubert(device, is_half, model_path): models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task([model_path], suffix='', ) hubert = models[0] hubert = hubert.to(device) if is_half: hubert = hubert.half() else: hubert = hubert.float() hubert.eval() return hubert def get_vc(device, is_half, config, model_path): cpt = torch.load(model_path, map_location='cpu') if "config" not in cpt or "weight" not in cpt: raise ValueError(f'Incorrect format for {model_path}. Use a voice model trained using RVC v2 instead.') tgt_sr = cpt["config"][-1] cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] if_f0 = cpt.get("f0", 1) version = cpt.get("version", "v1") if version == "v1": if if_f0 == 1: net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half) else: net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) elif version == "v2": if if_f0 == 1:
class Config: def __init__(self, device, is_half): self.device = device self.is_half = is_half self.n_cpu = 0 self.gpu_name = None self.gpu_mem = None self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() def device_config(self) -> tuple: if torch.cuda.is_available(): i_device = int(self.device.split(":")[-1]) self.gpu_name = torch.cuda.get_device_name(i_device) if ( ("16" in self.gpu_name and "V100" not in self.gpu_name.upper()) or "P40" in self.gpu_name.upper() or "1060" in self.gpu_name or "1070" in self.gpu_name or "1080" in self.gpu_name ): print("16 series/10 series P40 forced single precision") self.is_half = False else: self.gpu_name = None self.gpu_mem = int( torch.cuda.get_device_properties(i_device).total_memory / 1024 / 1024 / 1024 + 0.4 ) if self.gpu_mem <= 2: print('Not enough VRAM to load models (Probably)') self.device = 'cpu' elif torch.backends.mps.is_available(): print("No supported N-card found, use MPS for inference") self.device = "mps" else: print("No supported N-card found, use CPU for inference") self.device = "cpu" if self.n_cpu == 0: self.n_cpu = cpu_count() if self.is_half: # 6G memory config x_pad = 3 x_query = 10 x_center = 60 x_max = 65 else: # 5G memory config x_pad = 1 x_query = 6 x_center = 38 x_max = 41 if self.gpu_mem != None and self.gpu_mem <= 4: x_pad = 1 x_query = 5 x_center = 30 x_max = 32 return x_pad, x_query, x_center, x_max def load_hubert(device, is_half, model_path): models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task([model_path], suffix='', ) hubert = models[0] hubert = hubert.to(device) if is_half: hubert = hubert.half() else: hubert = hubert.float() hubert.eval() return hubert def get_vc(device, is_half, config, model_path): cpt = torch.load(model_path, map_location='cpu') if "config" not in cpt or "weight" not in cpt: raise ValueError(f'Incorrect format for {model_path}. Use a voice model trained using RVC v2 instead.') tgt_sr = cpt["config"][-1] cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] if_f0 = cpt.get("f0", 1) version = cpt.get("version", "v1") if version == "v1": if if_f0 == 1: net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half) else: net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) elif version == "v2": if if_f0 == 1:
net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=is_half)
2
2023-11-30 08:47:28+00:00
16k
ubc-vision/nf-soft-mining
examples/utils.py
[ { "identifier": "OccGridEstimator", "path": "nerfacc/estimators/occ_grid.py", "snippet": "class OccGridEstimator(AbstractEstimator):\n \"\"\"Occupancy grid transmittance estimator for spatial skipping.\n\n References: \"Instant Neural Graphics Primitives.\"\n\n Args:\n roi_aabb: The axis...
import random import numpy as np import torch from typing import Optional, Sequence from typing import Literal from typing_extensions import Literal from datasets.utils import Rays, namedtuple_map from torch.utils.data._utils.collate import collate, default_collate_fn_map from nerfacc.estimators.occ_grid import OccGridEstimator from nerfacc.estimators.prop_net import PropNetEstimator from nerfacc.grid import ray_aabb_intersect, traverse_grids from nerfacc.volrend import ( accumulate_along_rays_, render_weight_from_density, rendering, )
11,678
NERF_SYNTHETIC_SCENES = [ "chair", "drums", "ficus", "hotdog", "lego", "materials", "mic", "ship", ] MIPNERF360_UNBOUNDED_SCENES = [ "garden", "bicycle", "bonsai", "counter", "kitchen", "room", "stump", ] LLFF_NDC_SCENES = [ "fern", "flower", "fortress", "horns", "leaves", "orchids", "room_llff", "trex", ] def set_random_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) def render_image_with_occgrid( # scene radiance_field: torch.nn.Module, estimator: OccGridEstimator, rays: Rays, # rendering options near_plane: float = 0.0, far_plane: float = 1e10, render_step_size: float = 1e-3, render_bkgd: Optional[torch.Tensor] = None, cone_angle: float = 0.0, alpha_thre: float = 0.0, # test options test_chunk_size: int = 8192, # only useful for dnerf timestamps: Optional[torch.Tensor] = None, ): """Render the pixels of an image.""" rays_shape = rays.origins.shape if len(rays_shape) == 3: height, width, _ = rays_shape num_rays = height * width rays = namedtuple_map( lambda r: r.reshape([num_rays] + list(r.shape[2:])), rays ) else: num_rays, _ = rays_shape def sigma_fn(t_starts, t_ends, ray_indices): t_origins = chunk_rays.origins[ray_indices] t_dirs = chunk_rays.viewdirs[ray_indices] positions = t_origins + t_dirs * (t_starts + t_ends)[:, None] / 2.0 if timestamps is not None: # dnerf t = ( timestamps[ray_indices] if radiance_field.training else timestamps.expand_as(positions[:, :1]) ) sigmas = radiance_field.query_density(positions, t) else: sigmas = radiance_field.query_density(positions) return sigmas.squeeze(-1) def rgb_sigma_fn(t_starts, t_ends, ray_indices): t_origins = chunk_rays.origins[ray_indices] t_dirs = chunk_rays.viewdirs[ray_indices] positions = t_origins + t_dirs * (t_starts + t_ends)[:, None] / 2.0 if timestamps is not None: # dnerf t = ( timestamps[ray_indices] if radiance_field.training else timestamps.expand_as(positions[:, :1]) ) rgbs, sigmas = radiance_field(positions, t, t_dirs) else: rgbs, sigmas = radiance_field(positions, t_dirs) return rgbs, sigmas.squeeze(-1) results = [] chunk = ( torch.iinfo(torch.int32).max if radiance_field.training else test_chunk_size ) for i in range(0, num_rays, chunk): chunk_rays = namedtuple_map(lambda r: r[i : i + chunk], rays) ray_indices, t_starts, t_ends = estimator.sampling( chunk_rays.origins, chunk_rays.viewdirs, sigma_fn=sigma_fn, near_plane=near_plane, far_plane=far_plane, render_step_size=render_step_size, stratified=radiance_field.training, cone_angle=cone_angle, alpha_thre=alpha_thre, )
""" Copyright (c) 2022 Ruilong Li, UC Berkeley. """ try: except ImportError: NERF_SYNTHETIC_SCENES = [ "chair", "drums", "ficus", "hotdog", "lego", "materials", "mic", "ship", ] MIPNERF360_UNBOUNDED_SCENES = [ "garden", "bicycle", "bonsai", "counter", "kitchen", "room", "stump", ] LLFF_NDC_SCENES = [ "fern", "flower", "fortress", "horns", "leaves", "orchids", "room_llff", "trex", ] def set_random_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) def render_image_with_occgrid( # scene radiance_field: torch.nn.Module, estimator: OccGridEstimator, rays: Rays, # rendering options near_plane: float = 0.0, far_plane: float = 1e10, render_step_size: float = 1e-3, render_bkgd: Optional[torch.Tensor] = None, cone_angle: float = 0.0, alpha_thre: float = 0.0, # test options test_chunk_size: int = 8192, # only useful for dnerf timestamps: Optional[torch.Tensor] = None, ): """Render the pixels of an image.""" rays_shape = rays.origins.shape if len(rays_shape) == 3: height, width, _ = rays_shape num_rays = height * width rays = namedtuple_map( lambda r: r.reshape([num_rays] + list(r.shape[2:])), rays ) else: num_rays, _ = rays_shape def sigma_fn(t_starts, t_ends, ray_indices): t_origins = chunk_rays.origins[ray_indices] t_dirs = chunk_rays.viewdirs[ray_indices] positions = t_origins + t_dirs * (t_starts + t_ends)[:, None] / 2.0 if timestamps is not None: # dnerf t = ( timestamps[ray_indices] if radiance_field.training else timestamps.expand_as(positions[:, :1]) ) sigmas = radiance_field.query_density(positions, t) else: sigmas = radiance_field.query_density(positions) return sigmas.squeeze(-1) def rgb_sigma_fn(t_starts, t_ends, ray_indices): t_origins = chunk_rays.origins[ray_indices] t_dirs = chunk_rays.viewdirs[ray_indices] positions = t_origins + t_dirs * (t_starts + t_ends)[:, None] / 2.0 if timestamps is not None: # dnerf t = ( timestamps[ray_indices] if radiance_field.training else timestamps.expand_as(positions[:, :1]) ) rgbs, sigmas = radiance_field(positions, t, t_dirs) else: rgbs, sigmas = radiance_field(positions, t_dirs) return rgbs, sigmas.squeeze(-1) results = [] chunk = ( torch.iinfo(torch.int32).max if radiance_field.training else test_chunk_size ) for i in range(0, num_rays, chunk): chunk_rays = namedtuple_map(lambda r: r[i : i + chunk], rays) ray_indices, t_starts, t_ends = estimator.sampling( chunk_rays.origins, chunk_rays.viewdirs, sigma_fn=sigma_fn, near_plane=near_plane, far_plane=far_plane, render_step_size=render_step_size, stratified=radiance_field.training, cone_angle=cone_angle, alpha_thre=alpha_thre, )
rgb, opacity, depth, extras = rendering(
6
2023-11-27 22:12:55+00:00
16k
facebookresearch/SOC-matching
main.py
[ { "identifier": "get_folder_name", "path": "SOC_matching/utils.py", "snippet": "def get_folder_name(cfg):\n folder_name = (\n cfg.method.algorithm\n + \"_\"\n + cfg.method.setting\n + \"_\"\n + str(cfg.method.lmbd)\n + \"_\"\n + str(cfg.method.T)\n ...
import torch import sys import logging import os import time import json import hydra import traceback from tqdm.notebook import tqdm from omegaconf import DictConfig from SOC_matching.utils import ( get_folder_name, get_file_name, control_objective, save_results, compute_EMA, normalization_constant, ) from SOC_matching.method import ( SOC_Solver, ) from SOC_matching.experiment_settings.settings import define_variables
11,353
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory. log = logging.getLogger(__name__) @hydra.main(version_base=None, config_path="configs", config_name="soc") def main(cfg: DictConfig): logging.getLogger("lightning.pytorch").setLevel(logging.getLevelName("INFO")) print(cfg) print("Found {} CUDA devices.".format(torch.cuda.device_count())) for i in range(torch.cuda.device_count()): props = torch.cuda.get_device_properties(i) print( "{} \t Memory: {:.2f}GB".format( props.name, props.total_memory / (1024**3) ) ) keys = [ "SLURM_NODELIST", "SLURM_JOB_ID", "SLURM_NTASKS", "SLURM_JOB_NAME", "SLURM_PROCID", "SLURM_LOCALID", "SLURM_NODEID", ] log.info(json.dumps({k: os.environ.get(k, None) for k in keys}, indent=4)) cmd_str = " \\\n".join([f"python {sys.argv[0]}"] + ["\t" + x for x in sys.argv[1:]]) with open("cmd.sh", "w") as fout: print("#!/bin/bash\n", file=fout) print(cmd_str, file=fout) log.info(f"CWD: {os.getcwd()}") if cfg.method.use_gpu: cfg.method.device = "cuda:" + str(cfg.method.device_number) else: cfg.method.device = "cpu" torch.manual_seed(cfg.method.seed) algorithm = cfg.method.algorithm folder_name = ( cfg.method.algorithm + "_" + cfg.method.setting + "_" + str(cfg.method.lmbd) + "_" + str(cfg.method.T) + "_" + str(cfg.method.num_steps) + "_" + str(cfg.method.use_warm_start) + "_" + str(cfg.method.seed) + "_" + str(cfg.optim.batch_size) + "_" + str(cfg.optim.M_lr) + "_" + str(cfg.optim.nabla_V_lr) ) ts = torch.linspace(0, cfg.method.T, cfg.method.num_steps + 1).to(cfg.method.device) folder_name = get_folder_name(cfg)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory. log = logging.getLogger(__name__) @hydra.main(version_base=None, config_path="configs", config_name="soc") def main(cfg: DictConfig): logging.getLogger("lightning.pytorch").setLevel(logging.getLevelName("INFO")) print(cfg) print("Found {} CUDA devices.".format(torch.cuda.device_count())) for i in range(torch.cuda.device_count()): props = torch.cuda.get_device_properties(i) print( "{} \t Memory: {:.2f}GB".format( props.name, props.total_memory / (1024**3) ) ) keys = [ "SLURM_NODELIST", "SLURM_JOB_ID", "SLURM_NTASKS", "SLURM_JOB_NAME", "SLURM_PROCID", "SLURM_LOCALID", "SLURM_NODEID", ] log.info(json.dumps({k: os.environ.get(k, None) for k in keys}, indent=4)) cmd_str = " \\\n".join([f"python {sys.argv[0]}"] + ["\t" + x for x in sys.argv[1:]]) with open("cmd.sh", "w") as fout: print("#!/bin/bash\n", file=fout) print(cmd_str, file=fout) log.info(f"CWD: {os.getcwd()}") if cfg.method.use_gpu: cfg.method.device = "cuda:" + str(cfg.method.device_number) else: cfg.method.device = "cpu" torch.manual_seed(cfg.method.seed) algorithm = cfg.method.algorithm folder_name = ( cfg.method.algorithm + "_" + cfg.method.setting + "_" + str(cfg.method.lmbd) + "_" + str(cfg.method.T) + "_" + str(cfg.method.num_steps) + "_" + str(cfg.method.use_warm_start) + "_" + str(cfg.method.seed) + "_" + str(cfg.optim.batch_size) + "_" + str(cfg.optim.M_lr) + "_" + str(cfg.optim.nabla_V_lr) ) ts = torch.linspace(0, cfg.method.T, cfg.method.num_steps + 1).to(cfg.method.device) folder_name = get_folder_name(cfg)
file_name = get_file_name(folder_name, num_iterations=cfg.method.num_iterations)
1
2023-12-04 20:26:18+00:00
16k
yiwenlu66/learning-qp
src/modules/qp_unrolled_network.py
[ { "identifier": "QPSolver", "path": "src/modules/qp_solver.py", "snippet": "class QPSolver(nn.Module):\n \"\"\"\n Solve QP problem:\n minimize (1/2)x'Px + q'x\n subject to Hx + b >= 0,\n where x in R^n, b in R^m.\n \"\"\"\n def __init__(self, device, n, m,\n P=None, P...
import torch import numpy as np import scipy import functools import os from torch import nn from ..modules.qp_solver import QPSolver from ..modules.warm_starter import WarmStarter from ..utils.torch_utils import make_psd, interpolate_state_dicts from ..utils.mpc_utils import mpc2qp, scenario_robust_mpc, tube_robust_mpc from ..utils.osqp_utils import osqp_oracle from ..utils.np_batch_op import np_batch_op from concurrent.futures import ThreadPoolExecutor
11,273
else: self.qb_affine_layer = StrictAffineLayer(input_size, self.n_qp, self.m_qp, self.obs_has_half_ref) if self.n_mlp_output > 0: self.mlp = mlp_builder(input_size, self.n_mlp_output) else: self.mlp = None # TODO: add preconditioner self.warm_starter = WarmStarter(device, n_qp, m_qp, fixed_P=shared_PH, fixed_H=shared_PH) if use_warm_starter else None self.warm_starter_delayed = WarmStarter(device, n_qp, m_qp, fixed_P=shared_PH, fixed_H=shared_PH) if use_warm_starter else None self.train_warm_starter = train_warm_starter self.ws_loss_coef = ws_loss_coef self.ws_update_rate = ws_update_rate self.ws_loss_shaper = ws_loss_shaper # P, H are fixed when the model is in test mode, and they are constant across all states (i.e., shared_PH == True) self.fixed_PH = is_test and shared_PH # Includes losses generated by the model itself (indepedent of interaction with env), e.g., warm starting & preconditioning self.autonomous_losses = {} self.mpc_baseline = mpc_baseline self.use_osqp_for_mpc = use_osqp_for_mpc self.imitate_mpc = imitate_mpc # Whether to consider residual loss during training - this can encourage feasibility of the learned QP problem self.use_residual_loss = use_residual_loss # Whether to force the problem to be feasible self.force_feasible = force_feasible self.feasible_lambda = feasible_lambda self.solver = None self.info = {} # Reserved for storing the controllers for each simulation instance when robust MPC is enabled self.robust_controllers = [] # Store info returned by env self.env_info = {} # When running batch testing, mask envs already done, to speed up computation (implemented for robust mpc); initialized at inference time since batch size is not known during initialization self.is_active = None def initialize_solver(self): # If the problem is forced to be feasible, the dimension of the solution is increased by 1 (introduce slack variable) n_qp_actual = self.n_qp + 1 if self.force_feasible else self.n_qp m_qp_actual = self.m_qp + 1 if self.force_feasible else self.m_qp # is_warm_starter_trainable is always False, since the warm starter is trained via another inference independent of the solver # When self.fixed_PH == True, the solver is initialized with fixed P, H matrices; otherwise, P, H are not passed to the solver during initialization time, but computed during the forward pass instead if not self.fixed_PH: self.solver = QPSolver(self.device, n_qp_actual, m_qp_actual, warm_starter=self.warm_starter_delayed, is_warm_starter_trainable=False, symmetric_constraint=self.symmetric, buffered=self.force_feasible) else: # Should be called after loading state dict Pinv, H = self.get_PH() self.solver = QPSolver(self.device, n_qp_actual, m_qp_actual, Pinv=Pinv.squeeze(0), H=H.squeeze(0), warm_starter=self.warm_starter_delayed, is_warm_starter_trainable=False, symmetric_constraint=self.symmetric, buffered=self.force_feasible) def compute_warm_starter_loss(self, q, b, Pinv, H, solver_Xs): qd, bd, Pinvd, Hd = map(lambda t: t.detach() if t is not None else None, [q, b, Pinv, H]) X0 = self.warm_starter(qd, bd, Pinvd, Hd) gt = solver_Xs[:, -1, :].detach() return self.ws_loss_coef * self.ws_loss_shaper(((gt - X0) ** 2).sum(dim=-1).mean()) def parallel_controller_creation(self, controller_creator, xref_np, bs): """ Create robust MPC controlller in parallel """ # Helper function for parallel execution def task_creator(index): return controller_creator(self.mpc_baseline, xref_np[index, :]) with ThreadPoolExecutor() as executor: # Executing the tasks in parallel results = executor.map(task_creator, range(bs)) # Collecting the results self.robust_controllers.extend(results) def run_mpc_baseline(self, x, use_osqp_oracle=False): robust_method = self.mpc_baseline.get("robust_method", None) x0, xref = self.mpc_baseline["obs_to_state_and_ref"](x) bs = x.shape[0] # Conversions between torch and np t = lambda a: torch.tensor(a, device=x.device, dtype=torch.float) f = lambda t: t.detach().cpu().numpy() f_sparse = lambda t: scipy.sparse.csc_matrix(t.cpu().numpy()) if robust_method is None: # Run vanilla MPC without robustness eps = 1e-3 n, m, P, q, H, b = mpc2qp( self.mpc_baseline["n_mpc"], self.mpc_baseline["m_mpc"], self.mpc_baseline["N"], t(self.mpc_baseline["A"]), t(self.mpc_baseline["B"]), t(self.mpc_baseline["Q"]), t(self.mpc_baseline["R"]), self.mpc_baseline["x_min"] + eps, self.mpc_baseline["x_max"] - eps, self.mpc_baseline["u_min"], self.mpc_baseline["u_max"], x0, xref, normalize=self.mpc_baseline.get("normalize", False), Qf=self.mpc_baseline.get("terminal_coef", 0.) * t(np.eye(self.mpc_baseline["n_mpc"])) if self.mpc_baseline.get("Qf", None) is None else t(self.mpc_baseline["Qf"]), ) if not use_osqp_oracle: solver = QPSolver(x.device, n, m, P=P, H=H) Xs, primal_sols = solver(q, b, iters=100) sol = primal_sols[:, -1, :] else: osqp_oracle_with_iter_count = functools.partial(osqp_oracle, return_iter_count=True) if q.shape[0] > 1:
class StrictAffineLayer(nn.Module): """ Layer mapping from obs to (q, b) in the strict affine form. """ def __init__(self, input_size, n, m, obs_has_half_ref): super().__init__() self.obs_has_half_ref = obs_has_half_ref self.input_size = input_size self.q_layer = nn.Linear(self.input_size, n, bias=False) if not self.obs_has_half_ref: self.b_layer = nn.Linear(self.input_size // 2, m, bias=True) else: self.b_layer = nn.Linear(self.input_size, m, bias=True) def forward(self, x): if not self.obs_has_half_ref: x0 = x[:, :self.input_size // 2] else: x0 = x q = self.q_layer(x) b = self.b_layer(x0) return torch.cat([q, b], dim=1) class QPUnrolledNetwork(nn.Module): """ Learn a QP problem from the input using a MLP, then solve the QP using fixed number of unrolled PDHG iterations. Form of QP: minimize (1/2)x'Px + q'x subject to Hx + b >= 0, where x in R^n, b in R^m. """ def __init__( self, device, input_size, n_qp, m_qp, qp_iter, mlp_builder, shared_PH=False, affine_qb=False, strict_affine_layer=False, obs_has_half_ref=False, symmetric=False, no_b=False, use_warm_starter=False, train_warm_starter=False, ws_loss_coef=1., ws_update_rate=0.01, ws_loss_shaper=lambda x: x ** (1 / 2), mpc_baseline=None, use_osqp_for_mpc=False, imitate_mpc=False, use_residual_loss=False, force_feasible=False, feasible_lambda=10, is_test=False, ): """mlp_builder is a function mapping (input_size, output_size) to a nn.Sequential object. If shared_PH == True, P and H are parameters indepedent of input, and q and b are functions of input; Otherwise, (P, H, q, b) are all functions of input. If affine_qb == True, then q and b are restricted to be affine functions of input. If strict_affine_layer == True (only effective when affine_qb=True), then: 1. q is linear w.r.t. (x0, xref) (no bias) 2. b is affine w.r.t. x0 (no dependence on xref) If obs_has_half_ref == True, the policy knows that the observation is in the form (x0, xref), with each taking up half of the dimension of the observation. If symmetric == True (only effective when affine_qb=True), then: 1. The bias terms are disabled in the modeling of q and b, i.e., q = Wq * x, b = Wb * x. 2. The constraint is assumed to be -1 <= Hx + b <= 1, instead of Hx + b >= 0. If no_b == True in addition to symmetric == True, then b is skipped altogether, i.e., the constraint is assumed to be -1 <= Hx <= 1. If mpc_baseline != None and imitate_mpc == False, then the forward function directly returns the solution of the MPC problem, instead of solving the learned QP problem. Can be used for benchmarking MPC. If mpc_baseline != None and imitate_mpc == True, then the forward function returns the solution of the learned QP problem, but a loss term is computed using the MPC problem. Can be used for supervised imitation learning. If force_feasible == True, solve the following problem instead of the original QP problem: minimize_{x,y} (1/2)x'Px + q'x + lambda * y^2 s.t. Hx + b + y * 1 >= 0, y >= 0, where x in R^n, y in R. In this case, the solution returned will be of dimension (n + 1). """ super().__init__() self.shared_PH = shared_PH self.affine_qb = affine_qb self.strict_affine_layer = strict_affine_layer self.obs_has_half_ref = obs_has_half_ref self.device = device self.input_size = input_size # QP dimensions: there are the number of variables and constraints WITHOUT considering the slack variable self.n_qp = n_qp self.m_qp = m_qp self.qp_iter = qp_iter self.symmetric = symmetric self.no_b = no_b self.n_P_param = n_qp * (n_qp + 1) // 2 self.n_q_param = n_qp self.n_H_param = m_qp * n_qp self.n_b_param = m_qp if not self.no_b else 0 self.n_mlp_output = 0 if not self.shared_PH: self.n_mlp_output += (self.n_P_param + self.n_H_param) self.P_params = None self.H_params = None else: self.P_params = nn.Parameter(torch.randn((self.n_P_param,), device=device)) self.H_params = nn.Parameter(torch.randn((self.n_H_param,), device=device)) if not self.affine_qb: self.n_mlp_output += (self.n_q_param + self.n_b_param) self.qb_affine_layer = None else: if not self.strict_affine_layer: self.qb_affine_layer = nn.Linear(input_size, self.n_q_param + self.n_b_param, bias=not self.symmetric) else: self.qb_affine_layer = StrictAffineLayer(input_size, self.n_qp, self.m_qp, self.obs_has_half_ref) if self.n_mlp_output > 0: self.mlp = mlp_builder(input_size, self.n_mlp_output) else: self.mlp = None # TODO: add preconditioner self.warm_starter = WarmStarter(device, n_qp, m_qp, fixed_P=shared_PH, fixed_H=shared_PH) if use_warm_starter else None self.warm_starter_delayed = WarmStarter(device, n_qp, m_qp, fixed_P=shared_PH, fixed_H=shared_PH) if use_warm_starter else None self.train_warm_starter = train_warm_starter self.ws_loss_coef = ws_loss_coef self.ws_update_rate = ws_update_rate self.ws_loss_shaper = ws_loss_shaper # P, H are fixed when the model is in test mode, and they are constant across all states (i.e., shared_PH == True) self.fixed_PH = is_test and shared_PH # Includes losses generated by the model itself (indepedent of interaction with env), e.g., warm starting & preconditioning self.autonomous_losses = {} self.mpc_baseline = mpc_baseline self.use_osqp_for_mpc = use_osqp_for_mpc self.imitate_mpc = imitate_mpc # Whether to consider residual loss during training - this can encourage feasibility of the learned QP problem self.use_residual_loss = use_residual_loss # Whether to force the problem to be feasible self.force_feasible = force_feasible self.feasible_lambda = feasible_lambda self.solver = None self.info = {} # Reserved for storing the controllers for each simulation instance when robust MPC is enabled self.robust_controllers = [] # Store info returned by env self.env_info = {} # When running batch testing, mask envs already done, to speed up computation (implemented for robust mpc); initialized at inference time since batch size is not known during initialization self.is_active = None def initialize_solver(self): # If the problem is forced to be feasible, the dimension of the solution is increased by 1 (introduce slack variable) n_qp_actual = self.n_qp + 1 if self.force_feasible else self.n_qp m_qp_actual = self.m_qp + 1 if self.force_feasible else self.m_qp # is_warm_starter_trainable is always False, since the warm starter is trained via another inference independent of the solver # When self.fixed_PH == True, the solver is initialized with fixed P, H matrices; otherwise, P, H are not passed to the solver during initialization time, but computed during the forward pass instead if not self.fixed_PH: self.solver = QPSolver(self.device, n_qp_actual, m_qp_actual, warm_starter=self.warm_starter_delayed, is_warm_starter_trainable=False, symmetric_constraint=self.symmetric, buffered=self.force_feasible) else: # Should be called after loading state dict Pinv, H = self.get_PH() self.solver = QPSolver(self.device, n_qp_actual, m_qp_actual, Pinv=Pinv.squeeze(0), H=H.squeeze(0), warm_starter=self.warm_starter_delayed, is_warm_starter_trainable=False, symmetric_constraint=self.symmetric, buffered=self.force_feasible) def compute_warm_starter_loss(self, q, b, Pinv, H, solver_Xs): qd, bd, Pinvd, Hd = map(lambda t: t.detach() if t is not None else None, [q, b, Pinv, H]) X0 = self.warm_starter(qd, bd, Pinvd, Hd) gt = solver_Xs[:, -1, :].detach() return self.ws_loss_coef * self.ws_loss_shaper(((gt - X0) ** 2).sum(dim=-1).mean()) def parallel_controller_creation(self, controller_creator, xref_np, bs): """ Create robust MPC controlller in parallel """ # Helper function for parallel execution def task_creator(index): return controller_creator(self.mpc_baseline, xref_np[index, :]) with ThreadPoolExecutor() as executor: # Executing the tasks in parallel results = executor.map(task_creator, range(bs)) # Collecting the results self.robust_controllers.extend(results) def run_mpc_baseline(self, x, use_osqp_oracle=False): robust_method = self.mpc_baseline.get("robust_method", None) x0, xref = self.mpc_baseline["obs_to_state_and_ref"](x) bs = x.shape[0] # Conversions between torch and np t = lambda a: torch.tensor(a, device=x.device, dtype=torch.float) f = lambda t: t.detach().cpu().numpy() f_sparse = lambda t: scipy.sparse.csc_matrix(t.cpu().numpy()) if robust_method is None: # Run vanilla MPC without robustness eps = 1e-3 n, m, P, q, H, b = mpc2qp( self.mpc_baseline["n_mpc"], self.mpc_baseline["m_mpc"], self.mpc_baseline["N"], t(self.mpc_baseline["A"]), t(self.mpc_baseline["B"]), t(self.mpc_baseline["Q"]), t(self.mpc_baseline["R"]), self.mpc_baseline["x_min"] + eps, self.mpc_baseline["x_max"] - eps, self.mpc_baseline["u_min"], self.mpc_baseline["u_max"], x0, xref, normalize=self.mpc_baseline.get("normalize", False), Qf=self.mpc_baseline.get("terminal_coef", 0.) * t(np.eye(self.mpc_baseline["n_mpc"])) if self.mpc_baseline.get("Qf", None) is None else t(self.mpc_baseline["Qf"]), ) if not use_osqp_oracle: solver = QPSolver(x.device, n, m, P=P, H=H) Xs, primal_sols = solver(q, b, iters=100) sol = primal_sols[:, -1, :] else: osqp_oracle_with_iter_count = functools.partial(osqp_oracle, return_iter_count=True) if q.shape[0] > 1:
sol_np, iter_counts = np_batch_op(osqp_oracle_with_iter_count, f(q), f(b), f_sparse(P), f_sparse(H))
8
2023-11-28 05:56:22+00:00
16k
Fraunhofer-SCAI/llamol
sample.py
[ { "identifier": "Transformer", "path": "model.py", "snippet": "class Transformer(nn.Module):\n last_loss: Optional[torch.Tensor]\n\n def __init__(self, params: ModelArgs, context_params: ContextArgs):\n super().__init__()\n self.params = params\n self.context_params = context_...
import os import sys import time import pandas as pd import torch import numpy as np import re import logging import argparse import rdkit.rdBase as rkrb import rdkit.RDLogger as rkl from contextlib import nullcontext from tqdm.auto import tqdm from model import Transformer from plot_utils import ( check_metrics, plot_1D_condition, plot_2D_condition, plot_3D_condition, plot_unconditional, ) from tokenizer import SmilesTokenizer from typing import Dict, List, Tuple, Union from rdkit import Chem from rdkit import DataStructs from rdkit.Chem.Fingerprints import FingerprintMols
11,145
# from tqdm.notebook import tqdm logger = logging.getLogger(__name__) class Sampler: def __init__( self, load_path: str, device: str = "cpu", seed: int = 1337, dtype: str = "float16", compile: bool = True, quantize: bool = False, ) -> None: self.load_path = load_path self.device = device self.dtype = dtype self.compile = compile self.quantize = quantize self.seed = seed self._init_model() def _init_model(self): np.random.seed(self.seed) torch.cuda.manual_seed(self.seed) torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn self.device_type = ( "cuda" if "cuda" in self.device else "cpu" ) # for later use in torch.autocast ptdtype = { "float32": torch.float32, "bfloat16": torch.bfloat16, "float16": torch.float16, }[self.dtype] self.ptdtype = ptdtype self.ctx = self._autocast() # init from a model saved in a specific directory # ckpt_path = os.path.join(out_dir, "ckpt_full_dim=256.pt") self.model = Transformer.load(self.load_path, device=self.device) self.model.eval() if self.quantize: raise NotImplementedError("Not properly implemented for CPU / GPU") self.model = torch.ao.quantization.quantize_dynamic( self.model, # the original model {torch.nn.Linear}, # a set of layers to dynamically quantize dtype=torch.qint8, ) if self.compile: logger.info("Compiling the model...") self.model = torch.compile(self.model) # requires PyTorch 2.0 (optional) self.model = self.model.to(self.device) # load the tokenizer
# from tqdm.notebook import tqdm logger = logging.getLogger(__name__) class Sampler: def __init__( self, load_path: str, device: str = "cpu", seed: int = 1337, dtype: str = "float16", compile: bool = True, quantize: bool = False, ) -> None: self.load_path = load_path self.device = device self.dtype = dtype self.compile = compile self.quantize = quantize self.seed = seed self._init_model() def _init_model(self): np.random.seed(self.seed) torch.cuda.manual_seed(self.seed) torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn self.device_type = ( "cuda" if "cuda" in self.device else "cpu" ) # for later use in torch.autocast ptdtype = { "float32": torch.float32, "bfloat16": torch.bfloat16, "float16": torch.float16, }[self.dtype] self.ptdtype = ptdtype self.ctx = self._autocast() # init from a model saved in a specific directory # ckpt_path = os.path.join(out_dir, "ckpt_full_dim=256.pt") self.model = Transformer.load(self.load_path, device=self.device) self.model.eval() if self.quantize: raise NotImplementedError("Not properly implemented for CPU / GPU") self.model = torch.ao.quantization.quantize_dynamic( self.model, # the original model {torch.nn.Linear}, # a set of layers to dynamically quantize dtype=torch.qint8, ) if self.compile: logger.info("Compiling the model...") self.model = torch.compile(self.model) # requires PyTorch 2.0 (optional) self.model = self.model.to(self.device) # load the tokenizer
self.tokenizer = SmilesTokenizer()
6
2023-11-28 09:50:31+00:00
16k
lampmerchant/tashrouter
tashrouter/router/router.py
[ { "identifier": "RoutingTable", "path": "tashrouter/router/routing_table.py", "snippet": "class RoutingTable:\n '''A Router's routing table.'''\n \n STATE_GOOD = 1\n STATE_SUS = 2\n STATE_BAD = 3\n STATE_WORST = 4\n \n def __init__(self, router):\n self._router = router\n self._entry_by_ne...
import logging from .routing_table import RoutingTable from .zone_information_table import ZoneInformationTable from ..datagram import Datagram from ..service.echo import EchoService from ..service.name_information import NameInformationService from ..service.routing_table_aging import RoutingTableAgingService from ..service.rtmp.responding import RtmpRespondingService from ..service.rtmp.sending import RtmpSendingService from ..service.zip.responding import ZipRespondingService from ..service.zip.sending import ZipSendingService
12,099
'''The heart of this whole affair.''' class Router: '''A router, a device which sends Datagrams to Ports and runs Services.''' def __init__(self, short_str, ports): self._short_str = short_str self.ports = ports self._services = ( (EchoService.ECHO_SAS, EchoService()), (NameInformationService.NBP_SAS, NameInformationService()), (None, RoutingTableAgingService()), (RtmpRespondingService.RTMP_SAS, RtmpRespondingService()), (None, RtmpSendingService()), (ZipRespondingService.ZIP_SAS, ZipRespondingService()), (None, ZipSendingService()), ) self.zone_information_table = ZoneInformationTable(self) self._services_by_sas = {} for sas, service in self._services: if sas is not None: self._services_by_sas[sas] = service
'''The heart of this whole affair.''' class Router: '''A router, a device which sends Datagrams to Ports and runs Services.''' def __init__(self, short_str, ports): self._short_str = short_str self.ports = ports self._services = ( (EchoService.ECHO_SAS, EchoService()), (NameInformationService.NBP_SAS, NameInformationService()), (None, RoutingTableAgingService()), (RtmpRespondingService.RTMP_SAS, RtmpRespondingService()), (None, RtmpSendingService()), (ZipRespondingService.ZIP_SAS, ZipRespondingService()), (None, ZipSendingService()), ) self.zone_information_table = ZoneInformationTable(self) self._services_by_sas = {} for sas, service in self._services: if sas is not None: self._services_by_sas[sas] = service
self.routing_table = RoutingTable(self)
0
2023-12-02 15:17:07+00:00
16k
andryyy/ehlocomputer
models/listeners.py
[ { "identifier": "defaults", "path": "config/defaults.py", "snippet": "ACCEPT_LANGUAGES = [\"en\", \"de\"]\nMAX_HISTORIC_REVISIONS = 5\nWEBAUTHN_CHALLENGE_TIMEOUT = 30 # seconds\nPROXY_AUTH_TIMEOUT = 300 # seconds\nTABLE_PAGE_SIZE = 10\nTINYDB = {\n \"storage\": RedisLockMiddleware(JSONStorage),\n ...
import json import os import re import uuid from config import defaults from config import lego from config.database import * from email_validator import validate_email from pydantic import ( AfterValidator, BaseModel, EmailStr, Field, FilePath, HttpUrl, field_validator, model_validator, validator, ) from pydantic.networks import IPv4Address, IPv6Address from typing import Annotated, Any, Literal from . import ( utc_now_as_str, ensure_list, to_unique_sorted_str_list, get_validated_fqdn, flatten, )
12,793
class ListenerCreate(BaseModel): id: Annotated[str, Field(default_factory=lambda: str(uuid.uuid4()))] name: Annotated[str, Field(min_length=1)] configuration: dict = {} historic: list = [] created: Annotated[str, Field(default_factory=utc_now_as_str)] updated: Annotated[str, Field(default_factory=utc_now_as_str)] class ListenerLegoConfig(BaseModel): lego_provider: str acme_terms_agreed: Literal[True, "true"] provider_config: dict acme_server: Annotated[str, AfterValidator(lambda x: str(HttpUrl(x)))] acme_email: EmailStr key_type: Literal["EC256", "EC384", "RSA2048", "RSA4096", "RSA8192"] = "RSA2048" domains: str @model_validator(mode="before") @classmethod def check_lego(self, data: Any) -> Any: if data.get("lego_provider") not in lego.LEGO_DNS_PROVIDERS.keys(): raise ValueError( f"Value {data.get('lego_provider')} is not a lego provider" ) _envs_available = flatten( [ list(p.keys()) for p in lego.LEGO_DNS_PROVIDERS.get(data.get("lego_provider"), []) ] ) for _k, _v in data.get("provider_config").items(): if _k not in _envs_available: raise ValueError( f"{_k} is not a valid environment variable for the given lego DNS client" ) if not isinstance(_v, str): raise ValueError(f"Value of {_k} is not a string") return data class ListenerServerListener(BaseModel): hostname: Annotated[ str,
class ListenerCreate(BaseModel): id: Annotated[str, Field(default_factory=lambda: str(uuid.uuid4()))] name: Annotated[str, Field(min_length=1)] configuration: dict = {} historic: list = [] created: Annotated[str, Field(default_factory=utc_now_as_str)] updated: Annotated[str, Field(default_factory=utc_now_as_str)] class ListenerLegoConfig(BaseModel): lego_provider: str acme_terms_agreed: Literal[True, "true"] provider_config: dict acme_server: Annotated[str, AfterValidator(lambda x: str(HttpUrl(x)))] acme_email: EmailStr key_type: Literal["EC256", "EC384", "RSA2048", "RSA4096", "RSA8192"] = "RSA2048" domains: str @model_validator(mode="before") @classmethod def check_lego(self, data: Any) -> Any: if data.get("lego_provider") not in lego.LEGO_DNS_PROVIDERS.keys(): raise ValueError( f"Value {data.get('lego_provider')} is not a lego provider" ) _envs_available = flatten( [ list(p.keys()) for p in lego.LEGO_DNS_PROVIDERS.get(data.get("lego_provider"), []) ] ) for _k, _v in data.get("provider_config").items(): if _k not in _envs_available: raise ValueError( f"{_k} is not a valid environment variable for the given lego DNS client" ) if not isinstance(_v, str): raise ValueError(f"Value of {_k} is not a string") return data class ListenerServerListener(BaseModel): hostname: Annotated[ str,
AfterValidator(lambda x: get_validated_fqdn(x)),
5
2023-12-01 08:36:45+00:00
16k
fzmi/ubdd
models/dino/models/dino/dino.py
[ { "identifier": "box_ops", "path": "models/dino/util/box_ops.py", "snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef box_iou_pairwise(boxes1, boxes2):\ndef generalized_box_iou_pairwise(boxes1, boxes2):\ndef mask...
import copy import math import torch import torch.nn.functional as F from typing import List from torch import nn from torchvision.ops.boxes import nms from models.dino.util import box_ops from models.dino.util.misc import (NestedTensor, nested_tensor_from_tensor_list, accuracy, get_world_size, interpolate, is_dist_avail_and_initialized, inverse_sigmoid) from .backbone import build_backbone from .matcher import build_matcher from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm, dice_loss) from .deformable_transformer import build_deformable_transformer from .utils import sigmoid_focal_loss, MLP from ..registry import MODULE_BUILD_FUNCS from .dn_components import prepare_for_cdn,dn_post_process
11,759
if log: # TODO this should probably be a separate loss, not hacked in this one here losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0] return losses @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients """ pred_logits = outputs['pred_logits'] device = pred_logits.device tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1) card_err = F.l1_loss(card_pred.float(), tgt_lengths.float()) losses = {'cardinality_error': card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4] The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ assert 'pred_boxes' in outputs idx = self._get_src_permutation_idx(indices) src_boxes = outputs['pred_boxes'][idx] target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none') losses = {} losses['loss_bbox'] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag(box_ops.generalized_box_iou( box_ops.box_cxcywh_to_xyxy(src_boxes), box_ops.box_cxcywh_to_xyxy(target_boxes))) losses['loss_giou'] = loss_giou.sum() / num_boxes # calculate the x,y and h,w loss with torch.no_grad(): losses['loss_xy'] = loss_bbox[..., :2].sum() / num_boxes losses['loss_hw'] = loss_bbox[..., 2:].sum() / num_boxes return losses def loss_masks(self, outputs, targets, indices, num_boxes): """Compute the losses related to the masks: the focal loss and the dice loss. targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w] """ assert "pred_masks" in outputs src_idx = self._get_src_permutation_idx(indices) tgt_idx = self._get_tgt_permutation_idx(indices) src_masks = outputs["pred_masks"] src_masks = src_masks[src_idx] masks = [t["masks"] for t in targets] # TODO use valid to mask invalid areas due to padding in loss target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() target_masks = target_masks.to(src_masks) target_masks = target_masks[tgt_idx] # upsample predictions to the target size src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False) src_masks = src_masks[:, 0].flatten(1) target_masks = target_masks.flatten(1) target_masks = target_masks.view(src_masks.shape) losses = { "loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes), "loss_dice": dice_loss(src_masks, target_masks, num_boxes), } return losses def _get_src_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) src_idx = torch.cat([src for (src, _) in indices]) return batch_idx, src_idx def _get_tgt_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) tgt_idx = torch.cat([tgt for (_, tgt) in indices]) return batch_idx, tgt_idx def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs): loss_map = { 'labels': self.loss_labels, 'cardinality': self.loss_cardinality, 'boxes': self.loss_boxes, 'masks': self.loss_masks, } assert loss in loss_map, f'do you really want to compute {loss} loss?' return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs) def forward(self, outputs, targets, return_indices=False): """ This performs the loss computation. Parameters: outputs: dict of tensors, see the output specification of the model for the format targets: list of dicts, such that len(targets) == batch_size. The expected keys in each dict depends on the losses applied, see each loss' doc return_indices: used for vis. if True, the layer0-5 indices will be returned as well. """ outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'} device=next(iter(outputs.values())).device indices = self.matcher(outputs_without_aux, targets) if return_indices: indices0_copy = indices indices_list = [] # Compute the average number of target boxes accross all nodes, for normalization purposes num_boxes = sum(len(t["labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=device)
# ------------------------------------------------------------------------ # DINO # Copyright (c) 2022 IDEA. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Conditional DETR model and criterion classes. # Copyright (c) 2021 Microsoft. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # ------------------------------------------------------------------------ # Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR) # Copyright (c) 2020 SenseTime. All Rights Reserved. # ------------------------------------------------------------------------ class DINO(nn.Module): """ This is the Cross-Attention Detector module that performs object detection """ def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False, iter_update=False, query_dim=2, random_refpoints_xy=False, fix_refpoints_hw=-1, num_feature_levels=1, nheads=8, # two stage two_stage_type='no', # ['no', 'standard'] two_stage_add_query_num=0, dec_pred_class_embed_share=True, dec_pred_bbox_embed_share=True, two_stage_class_embed_share=True, two_stage_bbox_embed_share=True, decoder_sa_type = 'sa', num_patterns = 0, dn_number = 100, dn_box_noise_scale = 0.4, dn_label_noise_ratio = 0.5, dn_labelbook_size = 100, ): """ Initializes the model. Parameters: backbone: torch module of the backbone to be used. See backbone.py transformer: torch module of the transformer architecture. See transformer.py num_classes: number of object classes num_queries: number of object queries, ie detection slot. This is the maximal number of objects Conditional DETR can detect in a single image. For COCO, we recommend 100 queries. aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. fix_refpoints_hw: -1(default): learn w and h for each box seperately >0 : given fixed number -2 : learn a shared w and h """ super().__init__() self.num_queries = num_queries self.transformer = transformer self.num_classes = num_classes self.hidden_dim = hidden_dim = transformer.d_model self.num_feature_levels = num_feature_levels self.nheads = nheads self.label_enc = nn.Embedding(dn_labelbook_size + 1, hidden_dim) # setting query dim self.query_dim = query_dim assert query_dim == 4 self.random_refpoints_xy = random_refpoints_xy self.fix_refpoints_hw = fix_refpoints_hw # for dn training self.num_patterns = num_patterns self.dn_number = dn_number self.dn_box_noise_scale = dn_box_noise_scale self.dn_label_noise_ratio = dn_label_noise_ratio self.dn_labelbook_size = dn_labelbook_size # prepare input projection layers if num_feature_levels > 1: num_backbone_outs = len(backbone.num_channels) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = backbone.num_channels[_] input_proj_list.append(nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), )) for _ in range(num_feature_levels - num_backbone_outs): input_proj_list.append(nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, hidden_dim), )) in_channels = hidden_dim self.input_proj = nn.ModuleList(input_proj_list) else: assert two_stage_type == 'no', "two_stage_type should be no if num_feature_levels=1 !!!" self.input_proj = nn.ModuleList([ nn.Sequential( nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), )]) self.backbone = backbone self.aux_loss = aux_loss self.box_pred_damping = box_pred_damping = None self.iter_update = iter_update assert iter_update, "Why not iter_update?" # prepare pred layers self.dec_pred_class_embed_share = dec_pred_class_embed_share self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share # prepare class & box embed _class_embed = nn.Linear(hidden_dim, num_classes) _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) # init the two embed layers prior_prob = 0.01 bias_value = -math.log((1 - prior_prob) / prior_prob) _class_embed.bias.data = torch.ones(self.num_classes) * bias_value nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0) if dec_pred_bbox_embed_share: box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)] else: box_embed_layerlist = [copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers)] if dec_pred_class_embed_share: class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)] else: class_embed_layerlist = [copy.deepcopy(_class_embed) for i in range(transformer.num_decoder_layers)] self.bbox_embed = nn.ModuleList(box_embed_layerlist) self.class_embed = nn.ModuleList(class_embed_layerlist) self.transformer.decoder.bbox_embed = self.bbox_embed self.transformer.decoder.class_embed = self.class_embed # two stage self.two_stage_type = two_stage_type self.two_stage_add_query_num = two_stage_add_query_num assert two_stage_type in ['no', 'standard'], "unknown param {} of two_stage_type".format(two_stage_type) if two_stage_type != 'no': if two_stage_bbox_embed_share: assert dec_pred_class_embed_share and dec_pred_bbox_embed_share self.transformer.enc_out_bbox_embed = _bbox_embed else: self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) if two_stage_class_embed_share: assert dec_pred_class_embed_share and dec_pred_bbox_embed_share self.transformer.enc_out_class_embed = _class_embed else: self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) self.refpoint_embed = None if self.two_stage_add_query_num > 0: self.init_ref_points(two_stage_add_query_num) self.decoder_sa_type = decoder_sa_type assert decoder_sa_type in ['sa', 'ca_label', 'ca_content'] if decoder_sa_type == 'ca_label': self.label_embedding = nn.Embedding(num_classes, hidden_dim) for layer in self.transformer.decoder.layers: layer.label_embedding = self.label_embedding else: for layer in self.transformer.decoder.layers: layer.label_embedding = None self.label_embedding = None self._reset_parameters() def _reset_parameters(self): # init input_proj for proj in self.input_proj: nn.init.xavier_uniform_(proj[0].weight, gain=1) nn.init.constant_(proj[0].bias, 0) def init_ref_points(self, use_num_queries): self.refpoint_embed = nn.Embedding(use_num_queries, self.query_dim) if self.random_refpoints_xy: self.refpoint_embed.weight.data[:, :2].uniform_(0,1) self.refpoint_embed.weight.data[:, :2] = inverse_sigmoid(self.refpoint_embed.weight.data[:, :2]) self.refpoint_embed.weight.data[:, :2].requires_grad = False if self.fix_refpoints_hw > 0: print("fix_refpoints_hw: {}".format(self.fix_refpoints_hw)) assert self.random_refpoints_xy self.refpoint_embed.weight.data[:, 2:] = self.fix_refpoints_hw self.refpoint_embed.weight.data[:, 2:] = inverse_sigmoid(self.refpoint_embed.weight.data[:, 2:]) self.refpoint_embed.weight.data[:, 2:].requires_grad = False elif int(self.fix_refpoints_hw) == -1: pass elif int(self.fix_refpoints_hw) == -2: print('learn a shared h and w') assert self.random_refpoints_xy self.refpoint_embed = nn.Embedding(use_num_queries, 2) self.refpoint_embed.weight.data[:, :2].uniform_(0,1) self.refpoint_embed.weight.data[:, :2] = inverse_sigmoid(self.refpoint_embed.weight.data[:, :2]) self.refpoint_embed.weight.data[:, :2].requires_grad = False self.hw_embed = nn.Embedding(1, 1) else: raise NotImplementedError('Unknown fix_refpoints_hw {}'.format(self.fix_refpoints_hw)) def forward(self, samples: NestedTensor, targets:List=None): """ The forward expects a NestedTensor, which consists of: - samples.tensor: batched images, of shape [batch_size x 3 x H x W] - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels It returns a dict with the following elements: - "pred_logits": the classification logits (including no-object) for all queries. Shape= [batch_size x num_queries x num_classes] - "pred_boxes": The normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image (disregarding possible padding). See PostProcess for information on how to retrieve the unnormalized bounding box. - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of dictionnaries containing the two above keys for each decoder layer. """ if isinstance(samples, (list, torch.Tensor)): samples = nested_tensor_from_tensor_list(samples) features, poss = self.backbone(samples) srcs = [] masks = [] for l, feat in enumerate(features): src, mask = feat.decompose() srcs.append(self.input_proj[l](src)) masks.append(mask) assert mask is not None if self.num_feature_levels > len(srcs): _len_srcs = len(srcs) for l in range(_len_srcs, self.num_feature_levels): if l == _len_srcs: src = self.input_proj[l](features[-1].tensors) else: src = self.input_proj[l](srcs[-1]) m = samples.mask mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0] pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype) srcs.append(src) masks.append(mask) poss.append(pos_l) if self.dn_number > 0 or targets is not None: input_query_label, input_query_bbox, attn_mask, dn_meta =\ prepare_for_cdn(dn_args=(targets, self.dn_number, self.dn_label_noise_ratio, self.dn_box_noise_scale), training=self.training,num_queries=self.num_queries,num_classes=self.num_classes, hidden_dim=self.hidden_dim,label_enc=self.label_enc) else: assert targets is None input_query_bbox = input_query_label = attn_mask = dn_meta = None hs, reference, hs_enc, ref_enc, init_box_proposal = self.transformer(srcs, masks, input_query_bbox, poss,input_query_label,attn_mask) # In case num object=0 hs[0] += self.label_enc.weight[0,0]*0.0 # deformable-detr-like anchor update # reference_before_sigmoid = inverse_sigmoid(reference[:-1]) # n_dec, bs, nq, 4 outputs_coord_list = [] for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate(zip(reference[:-1], self.bbox_embed, hs)): layer_delta_unsig = layer_bbox_embed(layer_hs) layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(layer_ref_sig) layer_outputs_unsig = layer_outputs_unsig.sigmoid() outputs_coord_list.append(layer_outputs_unsig) outputs_coord_list = torch.stack(outputs_coord_list) outputs_class = torch.stack([layer_cls_embed(layer_hs) for layer_cls_embed, layer_hs in zip(self.class_embed, hs)]) if self.dn_number > 0 and dn_meta is not None: outputs_class, outputs_coord_list = \ dn_post_process(outputs_class, outputs_coord_list, dn_meta,self.aux_loss,self._set_aux_loss) out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord_list[-1]} if self.aux_loss: out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord_list) # for encoder output if hs_enc is not None: # prepare intermediate outputs interm_coord = ref_enc[-1] interm_class = self.transformer.enc_out_class_embed(hs_enc[-1]) out['interm_outputs'] = {'pred_logits': interm_class, 'pred_boxes': interm_coord} out['interm_outputs_for_matching_pre'] = {'pred_logits': interm_class, 'pred_boxes': init_box_proposal} # prepare enc outputs if hs_enc.shape[0] > 1: enc_outputs_coord = [] enc_outputs_class = [] for layer_id, (layer_box_embed, layer_class_embed, layer_hs_enc, layer_ref_enc) in enumerate(zip(self.enc_bbox_embed, self.enc_class_embed, hs_enc[:-1], ref_enc[:-1])): layer_enc_delta_unsig = layer_box_embed(layer_hs_enc) layer_enc_outputs_coord_unsig = layer_enc_delta_unsig + inverse_sigmoid(layer_ref_enc) layer_enc_outputs_coord = layer_enc_outputs_coord_unsig.sigmoid() layer_enc_outputs_class = layer_class_embed(layer_hs_enc) enc_outputs_coord.append(layer_enc_outputs_coord) enc_outputs_class.append(layer_enc_outputs_class) out['enc_outputs'] = [ {'pred_logits': a, 'pred_boxes': b} for a, b in zip(enc_outputs_class, enc_outputs_coord) ] out['dn_meta'] = dn_meta return out @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [{'pred_logits': a, 'pred_boxes': b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] class SetCriterion(nn.Module): """ This class computes the loss for Conditional DETR. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box) """ def __init__(self, num_classes, matcher, weight_dict, focal_alpha, losses): """ Create the criterion. Parameters: num_classes: number of object categories, omitting the special no-object category matcher: module able to compute a matching between targets and proposals weight_dict: dict containing as key the names of the losses and as values their relative weight. losses: list of all the losses to be applied. See get_loss for list of available losses. focal_alpha: alpha in Focal Loss """ super().__init__() self.num_classes = num_classes self.matcher = matcher self.weight_dict = weight_dict self.losses = losses self.focal_alpha = focal_alpha def loss_labels(self, outputs, targets, indices, num_boxes, log=True): """Classification loss (Binary focal loss) targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] """ assert 'pred_logits' in outputs src_logits = outputs['pred_logits'] idx = self._get_src_permutation_idx(indices) target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full(src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2]+1], dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:,:,:-1] loss_ce = sigmoid_focal_loss(src_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * src_logits.shape[1] losses = {'loss_ce': loss_ce} if log: # TODO this should probably be a separate loss, not hacked in this one here losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0] return losses @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients """ pred_logits = outputs['pred_logits'] device = pred_logits.device tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1) card_err = F.l1_loss(card_pred.float(), tgt_lengths.float()) losses = {'cardinality_error': card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4] The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ assert 'pred_boxes' in outputs idx = self._get_src_permutation_idx(indices) src_boxes = outputs['pred_boxes'][idx] target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none') losses = {} losses['loss_bbox'] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag(box_ops.generalized_box_iou( box_ops.box_cxcywh_to_xyxy(src_boxes), box_ops.box_cxcywh_to_xyxy(target_boxes))) losses['loss_giou'] = loss_giou.sum() / num_boxes # calculate the x,y and h,w loss with torch.no_grad(): losses['loss_xy'] = loss_bbox[..., :2].sum() / num_boxes losses['loss_hw'] = loss_bbox[..., 2:].sum() / num_boxes return losses def loss_masks(self, outputs, targets, indices, num_boxes): """Compute the losses related to the masks: the focal loss and the dice loss. targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w] """ assert "pred_masks" in outputs src_idx = self._get_src_permutation_idx(indices) tgt_idx = self._get_tgt_permutation_idx(indices) src_masks = outputs["pred_masks"] src_masks = src_masks[src_idx] masks = [t["masks"] for t in targets] # TODO use valid to mask invalid areas due to padding in loss target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() target_masks = target_masks.to(src_masks) target_masks = target_masks[tgt_idx] # upsample predictions to the target size src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False) src_masks = src_masks[:, 0].flatten(1) target_masks = target_masks.flatten(1) target_masks = target_masks.view(src_masks.shape) losses = { "loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes), "loss_dice": dice_loss(src_masks, target_masks, num_boxes), } return losses def _get_src_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) src_idx = torch.cat([src for (src, _) in indices]) return batch_idx, src_idx def _get_tgt_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) tgt_idx = torch.cat([tgt for (_, tgt) in indices]) return batch_idx, tgt_idx def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs): loss_map = { 'labels': self.loss_labels, 'cardinality': self.loss_cardinality, 'boxes': self.loss_boxes, 'masks': self.loss_masks, } assert loss in loss_map, f'do you really want to compute {loss} loss?' return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs) def forward(self, outputs, targets, return_indices=False): """ This performs the loss computation. Parameters: outputs: dict of tensors, see the output specification of the model for the format targets: list of dicts, such that len(targets) == batch_size. The expected keys in each dict depends on the losses applied, see each loss' doc return_indices: used for vis. if True, the layer0-5 indices will be returned as well. """ outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'} device=next(iter(outputs.values())).device indices = self.matcher(outputs_without_aux, targets) if return_indices: indices0_copy = indices indices_list = [] # Compute the average number of target boxes accross all nodes, for normalization purposes num_boxes = sum(len(t["labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=device)
if is_dist_avail_and_initialized():
6
2023-12-04 00:27:58+00:00
16k
girgle/DouZero_For_New_HLDDZ
GOOD.py
[ { "identifier": "GameHelper", "path": "GameHelper.py", "snippet": "class GameHelper:\n def __init__(self):\n self.ScreenZoomRate = None\n self.counter = QTime()\n self.Pics = {}\n self.PicsCV = {}\n st = time.time()\n self.Handle = win32gui.FindWindow(\"Unity...
import GameHelper as gh import os import sys import time import threading import pyautogui import win32gui import multiprocessing as mp import DetermineColor as DC import cv2 import numpy as np import traceback import BidModel import LandlordModel import FarmerModel from GameHelper import GameHelper from PIL import Image from skimage.metrics import structural_similarity as ssim from collections import defaultdict from douzero.env.move_detector import get_move_type from PyQt5 import QtGui, QtWidgets, QtCore from PyQt5.QtWidgets import QTableWidgetItem, QInputDialog, QMessageBox from PyQt5.QtGui import QPixmap, QIcon from PyQt5.QtCore import QTime, QEventLoop, Qt from MainWindow import Ui_Form from douzero.env.game import GameEnv from douzero.evaluation.deep_agent import DeepAgent
12,150
# -*- coding: utf-8 -*- # Created by: Raf # Modify by: Vincentzyx EnvCard2RealCard = {3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'T', 11: 'J', 12: 'Q', 13: 'K', 14: 'A', 17: '2', 20: 'X', 30: 'D'} RealCard2EnvCard = {'3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'T': 10, 'J': 11, 'Q': 12, 'K': 13, 'A': 14, '2': 17, 'X': 20, 'D': 30} AllEnvCard = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30] AllCards = ['D', 'X', '2', 'A', 'K', 'Q', 'J', 'T', '9', '8', '7', '6', '5', '4', '3'] helper = GameHelper()
# -*- coding: utf-8 -*- # Created by: Raf # Modify by: Vincentzyx EnvCard2RealCard = {3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'T', 11: 'J', 12: 'Q', 13: 'K', 14: 'A', 17: '2', 20: 'X', 30: 'D'} RealCard2EnvCard = {'3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'T': 10, 'J': 11, 'Q': 12, 'K': 13, 'A': 14, '2': 17, 'X': 20, 'D': 30} AllEnvCard = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30] AllCards = ['D', 'X', '2', 'A', 'K', 'Q', 'J', 'T', '9', '8', '7', '6', '5', '4', '3'] helper = GameHelper()
class MyPyQT_Form(QtWidgets.QWidget, Ui_Form):
2
2023-12-01 04:04:30+00:00
16k
yongzhuo/MacroGPT-Pretrain
macro_gpt/ft_gpt/train.pt.py
[ { "identifier": "CUDA_VISIBLE_DEVICES", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "CUDA_VISIBLE_DEVICES = \"0\"" }, { "identifier": "USE_TORCH", "path": "macro_gpt/ft_gpt/config_macrogpt_1b3_float32.py", "snippet": "USE_TORCH = \"1\"" }, { "identifier"...
import random import copy import sys import os import bitsandbytes as bnb import torch.nn as nn import transformers import torch from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import CUDA_VISIBLE_DEVICES, USE_TORCH, CPU_NUMS # from config from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from peft import (get_peft_model_state_dict, get_peft_model, LoraConfig) from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.modeling_utils import unwrap_model from tensorboardX import SummaryWriter from datasets import load_dataset from macro_gpt.models.llama.modeling_llama import LlamaForCausalLM as LLMForCausalLM from macro_gpt.models.llama.tokenization_llama import LlamaTokenizer as LLMTokenizer from macro_gpt.models.llama.modeling_llama import LlamaConfig as LLMConfig from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import PATH_MODEL_PRETRAIN, DATA_PATH, MODEL_SAVE_DIR, REPO_ID from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import MICRO_BATCH_SIZE, BATCH_SIZE, GRADIENT_ACCUMULATION_STEPS from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import LEARNING_RATE, EPOCHS, SAVE_STEPS, VAL_SET_SIZE, TARGET_MODULES from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import IS_PARALLELIZABLE, MODEL_PARALLEL, USE_CACHE from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import MAX_LENGTH_Q, MAX_LENGTH_A, MAX_LENGTH_QA from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import LORA_DROPOUT, LORA_ALPHA, LORA_R from macro_gpt.ft_gpt.config_macrogpt_1b3_float32 import PATH_MODEL_CONFIG, PATH_TOKENIZER_PRETRAIN
13,833
# ID_BOS = 64792 # ID_EOS = 64793 # ID_MASK = 64789 # ID_PAD = 2 ID_EOP = 2 ID_SOP = 1 ID_BOS = 1 ID_EOS = 2 ID_PAD = 0 IDS_ORG = [ID_PAD] # { "<|endoftext|>": 50256, # "### End": 50257, # "### Instruction:": 50258, # "### Response:\n": 50259 # } # model = GPT2LMHeadModel.from_pretrained(PATH_MODEL_PRETRAIN) llm_config = LLMConfig.from_json_file(PATH_MODEL_CONFIG) model = LLMForCausalLM(llm_config) model.init_weights() model.gradient_checkpointing_enable() model.enable_input_require_grads() model.is_parallelizable = IS_PARALLELIZABLE model.model_parallel = MODEL_PARALLEL model.config.use_cache = USE_CACHE # model.clip_grad_norm_ = 1.0 # model = model.half().cuda() ## norm, lm_head层为fp32 # prepare_model_for_half_training(model, output_embedding_layer_name="lm_head", # use_gradient_checkpointing=True, layer_norm_names=["post_attention_layernorm", # "input_layernorm", # "norm", # ]) model = model.cuda() print_rank_0_named_parameters(model) tensorboardx_witer = SummaryWriter(logdir=MODEL_SAVE_DIR) # files = dfs_file(DATA_PATH) # files = [files for file in files if "data_merge.0" in file or "data_merge.1" in file] ### 只有一个train的情况 # data = load_dataset("json", data_files={"train": files}) data = load_dataset("json", data_files=DATA_PATH) # data = load_dataset("json", data_dir=DATA_PATH) # train_val = data["train"].train_test_split(test_size=min(VAL_SET_SIZE, # int(len(data["train"])/10000)), shuffle=True, seed=42) # VAL_SET_SIZE = max(min(VAL_SET_SIZE, int(len(data["train"])/10000)), 1) # generate_prompt(data["train"][0], is_logger=True) # train_val = data["train"].train_test_split(test_size=VAL_SET_SIZE, shuffle=True, seed=42) # train_data = train_val["train"].shuffle().map(generate_prompt) # val_data = train_val["test"].shuffle().map(generate_prompt) # generate_prompt(data["train"][0], is_logger=True) # train_val = data["train"].train_test_split(test_size=1024, shuffle=True, seed=42) # train_data = train_val["test"].shuffle().map(generate_prompt) # val_data = None generate_prompt(data["train"][0], is_logger=True) train_data = data["train"].shuffle().map(generate_prompt) val_data = None class CustomTrainer(transformers.Trainer): def compute_loss(self, model, inputs, return_outputs=False): inputs = {k: v.cuda() for k, v in inputs.items()} outputs = model(**inputs) # if contain labels, will calculate loss if local_rank_is_0: logs = {} tr_loss_scalar = self._nested_gather(outputs.loss.detach()).mean().item() logs["loss"] = round(tr_loss_scalar, 4) logs["lr"] = self.lr_scheduler.get_last_lr()[0] step = self.state.global_step for k, v in logs.items(): tensorboardx_witer.add_scalar(k, v, step) self.log(logs) if self.label_smoother is not None and "labels" in inputs: labels = inputs.pop("labels") else: labels = None # Save past state if it exists # TODO: this needs to be fixed and made cleaner later. if self.args.past_index >= 0: self._past = outputs[self.args.past_index] if labels is not None: if unwrap_model(model)._get_name() in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values(): loss = self.label_smoother(outputs, labels, shift_labels=True) else: loss = self.label_smoother(outputs, labels) else: if isinstance(outputs, dict) and "loss" not in outputs: raise ValueError( "The model did not return a loss from the inputs, only the following keys: " f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}." ) # We don't use .loss here since the model may return tuples instead of ModelOutput. loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0] # if llm_config.torch_dtype == "float16": # loss = loss.half() loss = loss.half() return (loss, outputs) if return_outputs else loss trainer = CustomTrainer( # data_collator=transformers.DataCollatorForSeq2Seq( # tokenizer, pad_to_multiple_of=8, # return_tensors="pt", padding=True # ), data_collator=data_collator, train_dataset=train_data, eval_dataset=val_data, model=model, args=transformers.TrainingArguments( gradient_accumulation_steps=GRADIENT_ACCUMULATION_STEPS,
# !/usr/bin/python # -*- coding: utf-8 -*- # @time : 2023/3/5 21:04 # @author : Mo # @function: macro-gpt path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) sys.path.append(path_root) os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:3072" os.environ["CUDA_VISIBLE_DEVICES"] = CUDA_VISIBLE_DEVICES os.environ["USE_TORCH"] = USE_TORCH os.environ["OMP_NUM_THREADS"] = CPU_NUMS # export OMP_NUM_THREADS=1 os.environ["OPENBLAS_NUM_THREADS"] = CPU_NUMS # export OPENBLAS_NUM_THREADS=1 os.environ["MKL_NUM_THREADS"] = CPU_NUMS # export MKL_NUM_THREADS=1 os.environ["VECLIB_MAXIMUM_THREADS"] = CPU_NUMS # export VECLIB_MAXIMUM_THREADS=1 os.environ["NUMEXPR_NUM_THREADS"] = CPU_NUMS # export NUMEXPR_NUM_THREADS=1 def save_model_state(model, config=None, model_save_dir="./", model_name="adapter_model.bin"): """ 仅保存 有梯度 的 模型参数(推荐使用) """ if not os.path.exists(model_save_dir): os.makedirs(model_save_dir) # save config if config: config.save_pretrained(model_save_dir) # config.to_dict() # save model path_model = os.path.join(model_save_dir, model_name) # grad_params_dict = {k: v.to("cpu") for k, v in model.named_parameters() # if v.requires_grad == True} grad_params_dict = {k: v.to("cpu") for k, v in model.named_parameters()} torch.save(grad_params_dict, path_model) print_rank_0("******model_save_path is {}******".format(path_model)) def print_rank_0_named_parameters(model, use_print_rank_0_data=False): """ 打印模型训练参数/数据类型信息 """ trainable_params = 0 all_param = 0 for name, param in model.named_parameters(): if use_print_rank_0_data: print_rank_0((name, param.data.dtype, param.requires_grad, param.data)) else: print_rank_0((name, param.data.dtype, param.requires_grad)) num_params = param.numel() # if using DS Zero 3 and the weights are initialized empty if num_params == 0 and hasattr(param, "ds_numel"): num_params = param.ds_numel all_param += num_params if param.requires_grad: trainable_params += num_params print_rank_0(f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}") def prepare_model_for_half_training(model, output_embedding_layer_name="lm_head", use_gradient_checkpointing=True, layer_norm_names=["layer_norm"]): r""" This method wrapps the entire protocol for preparing a model before running a training. This includes: 1- Cast the layernorm in fp32 2- making output embedding layer require grads 3- Add the upcasting of the lm head to fp32 Args: model, (`transformers.PreTrainedModel`): The loaded model from `transformers` """ # 不要使用 model.half(), 这样会先截取精度再训练了, 最初data就要保持half for name, param in model.named_parameters(): # freeze base model's layers # cast layer norm in fp32 for stability for 8bit models if param.ndim == 1 and any(layer_norm_name in name for layer_norm_name in layer_norm_names): param.data = param.data.to(torch.float32) elif output_embedding_layer_name in name: # lm_head也需要是tf.float32(最后一层) param.data = param.data.to(torch.float32) else: param.data = param.data.to(torch.half) if use_gradient_checkpointing: # For backward compatibility if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) # enable gradient checkpointing for memory efficiency model.gradient_checkpointing_enable() return model def generate_prompt(data_point, is_logger=False): # sorry about the formatting disaster gotta move fast # text_1 = f"指令:\n{data_point.get('instruction', '')}\n问:\n{data_point.get('input', '')}\n答:\n" \ # if data_point.get('input', '') else f"指令:\n{data_point.get('instruction', '')}\n答:\n" # text_2 = f"{data_point.get('output', '')}" text_a = data_point.get("a", "") prompt_str_1 = text_a # end with gMASK, <sop> x = tokenizer.encode(prompt_str_1) if len(x) > MAX_LENGTH_QA - 2: x = x[:MAX_LENGTH_QA - 2] if not x: x = [ID_PAD, ID_EOS] if x and x[-1] != ID_EOS: x += [ID_EOS] out = {"input_ids": x, "labels": []} if is_logger: print_rank_0(prompt_str_1) print_rank_0(out) return out def data_collator(batch): def get_position_ids(seq, bos_token_id): seq_length = len(seq) position_ids = torch.arange(seq_length, dtype=torch.long).unsqueeze(0) return position_ids def get_masks(seq, special_ids=IDS_ORG): """ padding-mask """ # mask until ID_SOP attention_mask = torch.ones((1, len(seq), len(seq))) attention_mask.tril_() # ### 如果 padding-right, 也mask掉 # for idx, s in enumerate(seq): # if s in special_ids: # attention_mask[..., idx] = 1 attention_mask = (attention_mask < 0.5).bool() return attention_mask len_max_batch = [len(batch[i].get("input_ids")) + len(batch[i].get("labels")) + 1 for i in range(len(batch))] len_max_batch = min(MAX_LENGTH_QA, max(len_max_batch)) batch_attention_mask = [] batch_position_ids = [] batch_input_ids = [] batch_labels = [] for ba in batch: x, y = ba.get("input_ids"), ba.get("labels") len_padding = len_max_batch - len(x) - len(y) if tokenizer.padding_side and tokenizer.padding_side == "left": labels = [-100] * len_padding + x + y input_ids = [ID_PAD] * (len_padding) + x + y else: labels = x + y + [-100] * len_padding input_ids = x + y + [ID_PAD] * (len_padding) tensor_position_ids = get_position_ids(input_ids, bos_token_id=ID_SOP) tensor_attention_mask = get_masks(input_ids, special_ids=IDS_ORG) tensor_input_ids = torch.tensor(input_ids, dtype=torch.long) tensor_labels = torch.tensor(labels, dtype=torch.long) batch_attention_mask.append(tensor_attention_mask) batch_position_ids.append(tensor_position_ids) batch_input_ids.append(tensor_input_ids) batch_labels.append(tensor_labels) # print_rank_0(batch_attention_mask) batch_attention_mask = torch.stack(batch_attention_mask) batch_position_ids = torch.stack(batch_position_ids) batch_input_ids = torch.stack(batch_input_ids) batch_labels = torch.stack(batch_labels) input_dict = { # "full_attention_mask": copy.deepcopy(batch_attention_mask), # "attention_mask": batch_attention_mask, # "position_ids": batch_position_ids, "input_ids": batch_input_ids, "labels": batch_labels, } # print_rank_0(input_dict) return input_dict def dfs_file(path_dir): """ 递归获取某个目录下的所有文件(所有层, 包括子目录) Args: path_dir[String]:, path of dir, eg. "/home/data" Returns: data[List]: data of input, eg. ["2020_01_08.txt"] """ path_files = [] for root, dirs, files in os.walk(path_dir): # 分别代表根目录、文件夹、文件 for file in files: # 遍历文件 file_path = os.path.join(root, file) # 获取文件绝对路径 path_files.append(file_path) # 将文件路径添加进列表 files = list(set(path_files)) files.sort() # the same list return files def print_rank_0(*args): """ 只打印 0 号GPU的 """ # if torch.distributed.get_rank() == 0: # 一般用0,当然,可以选任意的rank保存。 # print(*args) print(*args) def local_rank_is_0(): """ 判断是哪台机子的 """ # flag = False # if torch.distributed.get_rank() == 0: # flag = True # return flag return True # import torch.distributed as dist # dist.init_process_group(backend='nccl') # torch.distributed.init_process_group() tokenizer = LLMTokenizer.from_pretrained(PATH_TOKENIZER_PRETRAIN) # tokenizer.pad_token = tokenizer.eos_token # tokenizer.padding_side = "left" # Allow batched inference tokenizer.padding_side = "right" # Allow batched inference # ID_gMASK = 64790 # ID_BOS = 64792 # ID_EOS = 64793 # ID_MASK = 64789 # ID_PAD = 2 ID_EOP = 2 ID_SOP = 1 ID_BOS = 1 ID_EOS = 2 ID_PAD = 0 IDS_ORG = [ID_PAD] # { "<|endoftext|>": 50256, # "### End": 50257, # "### Instruction:": 50258, # "### Response:\n": 50259 # } # model = GPT2LMHeadModel.from_pretrained(PATH_MODEL_PRETRAIN) llm_config = LLMConfig.from_json_file(PATH_MODEL_CONFIG) model = LLMForCausalLM(llm_config) model.init_weights() model.gradient_checkpointing_enable() model.enable_input_require_grads() model.is_parallelizable = IS_PARALLELIZABLE model.model_parallel = MODEL_PARALLEL model.config.use_cache = USE_CACHE # model.clip_grad_norm_ = 1.0 # model = model.half().cuda() ## norm, lm_head层为fp32 # prepare_model_for_half_training(model, output_embedding_layer_name="lm_head", # use_gradient_checkpointing=True, layer_norm_names=["post_attention_layernorm", # "input_layernorm", # "norm", # ]) model = model.cuda() print_rank_0_named_parameters(model) tensorboardx_witer = SummaryWriter(logdir=MODEL_SAVE_DIR) # files = dfs_file(DATA_PATH) # files = [files for file in files if "data_merge.0" in file or "data_merge.1" in file] ### 只有一个train的情况 # data = load_dataset("json", data_files={"train": files}) data = load_dataset("json", data_files=DATA_PATH) # data = load_dataset("json", data_dir=DATA_PATH) # train_val = data["train"].train_test_split(test_size=min(VAL_SET_SIZE, # int(len(data["train"])/10000)), shuffle=True, seed=42) # VAL_SET_SIZE = max(min(VAL_SET_SIZE, int(len(data["train"])/10000)), 1) # generate_prompt(data["train"][0], is_logger=True) # train_val = data["train"].train_test_split(test_size=VAL_SET_SIZE, shuffle=True, seed=42) # train_data = train_val["train"].shuffle().map(generate_prompt) # val_data = train_val["test"].shuffle().map(generate_prompt) # generate_prompt(data["train"][0], is_logger=True) # train_val = data["train"].train_test_split(test_size=1024, shuffle=True, seed=42) # train_data = train_val["test"].shuffle().map(generate_prompt) # val_data = None generate_prompt(data["train"][0], is_logger=True) train_data = data["train"].shuffle().map(generate_prompt) val_data = None class CustomTrainer(transformers.Trainer): def compute_loss(self, model, inputs, return_outputs=False): inputs = {k: v.cuda() for k, v in inputs.items()} outputs = model(**inputs) # if contain labels, will calculate loss if local_rank_is_0: logs = {} tr_loss_scalar = self._nested_gather(outputs.loss.detach()).mean().item() logs["loss"] = round(tr_loss_scalar, 4) logs["lr"] = self.lr_scheduler.get_last_lr()[0] step = self.state.global_step for k, v in logs.items(): tensorboardx_witer.add_scalar(k, v, step) self.log(logs) if self.label_smoother is not None and "labels" in inputs: labels = inputs.pop("labels") else: labels = None # Save past state if it exists # TODO: this needs to be fixed and made cleaner later. if self.args.past_index >= 0: self._past = outputs[self.args.past_index] if labels is not None: if unwrap_model(model)._get_name() in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values(): loss = self.label_smoother(outputs, labels, shift_labels=True) else: loss = self.label_smoother(outputs, labels) else: if isinstance(outputs, dict) and "loss" not in outputs: raise ValueError( "The model did not return a loss from the inputs, only the following keys: " f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}." ) # We don't use .loss here since the model may return tuples instead of ModelOutput. loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0] # if llm_config.torch_dtype == "float16": # loss = loss.half() loss = loss.half() return (loss, outputs) if return_outputs else loss trainer = CustomTrainer( # data_collator=transformers.DataCollatorForSeq2Seq( # tokenizer, pad_to_multiple_of=8, # return_tensors="pt", padding=True # ), data_collator=data_collator, train_dataset=train_data, eval_dataset=val_data, model=model, args=transformers.TrainingArguments( gradient_accumulation_steps=GRADIENT_ACCUMULATION_STEPS,
per_device_train_batch_size=MICRO_BATCH_SIZE,
10
2023-11-30 12:39:19+00:00
16k
owkin/fedeca
fedeca/scripts/dp_logreg.py
[ { "identifier": "TorchDPFedAvgAlgo", "path": "fedeca/algorithms/torch_dp_fed_avg_algo.py", "snippet": "class TorchDPFedAvgAlgo(TorchFedAvgAlgo):\n \"\"\"To be inherited.\n\n Wraps the necessary operation so a torch model can be trained in the Federated\n Averaging strategy using DP.\n \"\"\"...
import sys import numpy as np import pandas as pd import torch import torch.nn as nn from itertools import product from sklearn.metrics import accuracy_score from substrafl.algorithms.pytorch import TorchNewtonRaphsonAlgo from substrafl.model_loading import download_algo_state from substrafl.strategies import FedAvg, NewtonRaphson from torch.optim import SGD from fedeca.algorithms.torch_dp_fed_avg_algo import TorchDPFedAvgAlgo from fedeca.fedeca_core import LogisticRegressionTorch from fedeca.utils import ( Experiment, make_accuracy_function, make_substrafl_torch_dataset_class, ) from fedeca.utils.survival_utils import CoxData, make_categorical
11,812
"""Runs the propensity model training part with DP.""" if __name__ == "__main__": epsilons = [0.1, 1.0, 5.0, 10.0][::-1] deltas = [10 ** (-i) for i in range(1, 3)] START_SEED = 42 NDIM = 10 NUM_ROUNDS = 10 NUM_UPDATES = 100 N_REPETITIONS = 5 BACKEND_TYPE = "subprocess" BATCH_SIZE = 32 na_proportion = 0.0 seeds = np.arange(START_SEED, START_SEED + N_REPETITIONS).tolist() rng = np.random.default_rng(seeds[0]) # Generating data with strong linear relationship simu_coxreg = CoxData( n_samples=300, ndim=NDIM, prop_treated=0.5, propensity="linear", dtype="float32", overlap=100.0, seed=rng, random_censoring=True, censoring_factor=0.3, standardize_features=False, ) X, T, C, treated, _ = simu_coxreg.generate_data() # Will make first columns to be categorical Xcat, Xcont = make_categorical(X, up_to=0) # Build the final dataframe using appropriate column names and adding missing values cols_dict = {} X = np.concatenate((Xcat, Xcont), axis=1) for i in range(Xcat.shape[1] + Xcont.shape[1]): currentX = X[:, i].astype("float32") mask_na = rng.uniform(0, 1, X.shape[0]) > (1.0 - na_proportion) currentX[mask_na] = np.nan if i < Xcat.shape[1]: colname = "cat_col" else: colname = "col" i -= Xcat.shape[1] cols_dict[f"{colname}_{i}"] = currentX # The absolute value is superfluous but just to be sure cols_dict["T"] = np.abs(T) cols_dict["E"] = (1.0 - C).astype("uint8") cols_dict["treated"] = treated df = pd.DataFrame(cols_dict) # Final cast of categorical columns that was impossible due to nan in numpy for i in range(Xcat.shape[1]): df[f"cat_col_{i}"] = df[f"cat_col_{i}"].astype("Int64") results_all_reps = [] edelta_list = list(product(epsilons, deltas)) accuracy_metrics_dict = {"accuracy": make_accuracy_function("treated")} # We set model and dataloaders to be the same for each rep
"""Runs the propensity model training part with DP.""" if __name__ == "__main__": epsilons = [0.1, 1.0, 5.0, 10.0][::-1] deltas = [10 ** (-i) for i in range(1, 3)] START_SEED = 42 NDIM = 10 NUM_ROUNDS = 10 NUM_UPDATES = 100 N_REPETITIONS = 5 BACKEND_TYPE = "subprocess" BATCH_SIZE = 32 na_proportion = 0.0 seeds = np.arange(START_SEED, START_SEED + N_REPETITIONS).tolist() rng = np.random.default_rng(seeds[0]) # Generating data with strong linear relationship simu_coxreg = CoxData( n_samples=300, ndim=NDIM, prop_treated=0.5, propensity="linear", dtype="float32", overlap=100.0, seed=rng, random_censoring=True, censoring_factor=0.3, standardize_features=False, ) X, T, C, treated, _ = simu_coxreg.generate_data() # Will make first columns to be categorical Xcat, Xcont = make_categorical(X, up_to=0) # Build the final dataframe using appropriate column names and adding missing values cols_dict = {} X = np.concatenate((Xcat, Xcont), axis=1) for i in range(Xcat.shape[1] + Xcont.shape[1]): currentX = X[:, i].astype("float32") mask_na = rng.uniform(0, 1, X.shape[0]) > (1.0 - na_proportion) currentX[mask_na] = np.nan if i < Xcat.shape[1]: colname = "cat_col" else: colname = "col" i -= Xcat.shape[1] cols_dict[f"{colname}_{i}"] = currentX # The absolute value is superfluous but just to be sure cols_dict["T"] = np.abs(T) cols_dict["E"] = (1.0 - C).astype("uint8") cols_dict["treated"] = treated df = pd.DataFrame(cols_dict) # Final cast of categorical columns that was impossible due to nan in numpy for i in range(Xcat.shape[1]): df[f"cat_col_{i}"] = df[f"cat_col_{i}"].astype("Int64") results_all_reps = [] edelta_list = list(product(epsilons, deltas)) accuracy_metrics_dict = {"accuracy": make_accuracy_function("treated")} # We set model and dataloaders to be the same for each rep
logreg_model = LogisticRegressionTorch(NDIM, torch.float32)
1
2023-11-27 18:01:37+00:00
16k
aliyun/pai-python-sdk
pai/api/training_job.py
[ { "identifier": "PaginatedResult", "path": "pai/api/base.py", "snippet": "class PaginatedResult(object):\n \"\"\"A class represent response of a pagination call to PAI service.\"\"\"\n\n items: List[Union[Dict[str, Any], str]] = None\n total_count: int = None\n\n def __init__(self, items: Li...
from typing import Any, Dict, List, Optional from ..api.base import PaginatedResult, ServiceName, WorkspaceScopedResourceAPI from ..libs.alibabacloud_paistudio20220112.models import ( AlgorithmSpec, CreateTrainingJobRequest, CreateTrainingJobRequestComputeResource, CreateTrainingJobRequestHyperParameters, CreateTrainingJobRequestInputChannels, CreateTrainingJobRequestLabels, CreateTrainingJobRequestOutputChannels, CreateTrainingJobRequestScheduler, CreateTrainingJobRequestUserVpc, CreateTrainingJobResponseBody, GetTrainingJobRequest, GetTrainingJobResponseBody, ListTrainingJobLogsRequest, ListTrainingJobLogsResponseBody, ListTrainingJobsRequest, )
12,362
# https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class TrainingJobAPI(WorkspaceScopedResourceAPI): BACKEND_SERVICE_NAME = ServiceName.PAI_STUDIO _list_method = "list_training_jobs_with_options" _create_method = "create_training_job_with_options" _get_method = "get_training_job_with_options" _list_logs_method = "list_training_job_logs_with_options" # _list_method = "list_training_jobs_with_options" def list( self, page_size: int = 20, page_number: int = 1, order: str = None, sort_by: str = None, status: str = None, training_job_name: str = None, ) -> PaginatedResult: request = ListTrainingJobsRequest( page_size=page_size, page_number=page_number, status=status, training_job_name=training_job_name, order=order, sort_by=sort_by, ) res = self._do_request( method_=self._list_method, tmp_req=request, ) return self.make_paginated_result(res) def get_api_object_by_resource_id(self, resource_id) -> Dict[str, Any]: res: GetTrainingJobResponseBody = self._do_request( method_=self._get_method, training_job_id=resource_id, request=GetTrainingJobRequest(), ) return res.to_map() def get(self, training_job_id) -> Dict[str, Any]: return self.get_api_object_by_resource_id(training_job_id) def create( self, instance_type, instance_count, job_name, hyperparameters: Optional[Dict[str, Any]] = None, input_channels: Optional[List[Dict[str, Any]]] = None, output_channels: Optional[List[Dict[str, Any]]] = None, labels: Optional[Dict[str, str]] = None, max_running_in_seconds: Optional[int] = None, description: Optional[str] = None, algorithm_name: Optional[str] = None, algorithm_version: Optional[str] = None, algorithm_provider: Optional[str] = None, algorithm_spec: Optional[Dict[str, Any]] = None, user_vpc_config: Optional[Dict[str, Any]] = None, ) -> str: """Create a TrainingJob.""" if algorithm_spec and ( algorithm_name or algorithm_version or algorithm_provider ): raise ValueError( "Please provide algorithm_spec or a tuple of (algorithm_name, " "algorithm_version or algorithm_provider), but not both." ) if algorithm_spec: algo_spec = AlgorithmSpec().from_map(algorithm_spec) else: algo_spec = None input_channels = [ CreateTrainingJobRequestInputChannels().from_map(ch) for ch in input_channels ] output_channels = [ CreateTrainingJobRequestOutputChannels().from_map(ch) for ch in output_channels ] compute_resource = CreateTrainingJobRequestComputeResource( ecs_count=instance_count, ecs_spec=instance_type, ) hyper_parameters = [ CreateTrainingJobRequestHyperParameters( name=name, value=str(value), ) for name, value in hyperparameters.items() ] labels = ( [ CreateTrainingJobRequestLabels(key=key, value=value) for key, value in labels.items() ] if labels else None ) scheduler = CreateTrainingJobRequestScheduler( max_running_time_in_seconds=max_running_in_seconds )
# Copyright 2023 Alibaba, Inc. or its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class TrainingJobAPI(WorkspaceScopedResourceAPI): BACKEND_SERVICE_NAME = ServiceName.PAI_STUDIO _list_method = "list_training_jobs_with_options" _create_method = "create_training_job_with_options" _get_method = "get_training_job_with_options" _list_logs_method = "list_training_job_logs_with_options" # _list_method = "list_training_jobs_with_options" def list( self, page_size: int = 20, page_number: int = 1, order: str = None, sort_by: str = None, status: str = None, training_job_name: str = None, ) -> PaginatedResult: request = ListTrainingJobsRequest( page_size=page_size, page_number=page_number, status=status, training_job_name=training_job_name, order=order, sort_by=sort_by, ) res = self._do_request( method_=self._list_method, tmp_req=request, ) return self.make_paginated_result(res) def get_api_object_by_resource_id(self, resource_id) -> Dict[str, Any]: res: GetTrainingJobResponseBody = self._do_request( method_=self._get_method, training_job_id=resource_id, request=GetTrainingJobRequest(), ) return res.to_map() def get(self, training_job_id) -> Dict[str, Any]: return self.get_api_object_by_resource_id(training_job_id) def create( self, instance_type, instance_count, job_name, hyperparameters: Optional[Dict[str, Any]] = None, input_channels: Optional[List[Dict[str, Any]]] = None, output_channels: Optional[List[Dict[str, Any]]] = None, labels: Optional[Dict[str, str]] = None, max_running_in_seconds: Optional[int] = None, description: Optional[str] = None, algorithm_name: Optional[str] = None, algorithm_version: Optional[str] = None, algorithm_provider: Optional[str] = None, algorithm_spec: Optional[Dict[str, Any]] = None, user_vpc_config: Optional[Dict[str, Any]] = None, ) -> str: """Create a TrainingJob.""" if algorithm_spec and ( algorithm_name or algorithm_version or algorithm_provider ): raise ValueError( "Please provide algorithm_spec or a tuple of (algorithm_name, " "algorithm_version or algorithm_provider), but not both." ) if algorithm_spec: algo_spec = AlgorithmSpec().from_map(algorithm_spec) else: algo_spec = None input_channels = [ CreateTrainingJobRequestInputChannels().from_map(ch) for ch in input_channels ] output_channels = [ CreateTrainingJobRequestOutputChannels().from_map(ch) for ch in output_channels ] compute_resource = CreateTrainingJobRequestComputeResource( ecs_count=instance_count, ecs_spec=instance_type, ) hyper_parameters = [ CreateTrainingJobRequestHyperParameters( name=name, value=str(value), ) for name, value in hyperparameters.items() ] labels = ( [ CreateTrainingJobRequestLabels(key=key, value=value) for key, value in labels.items() ] if labels else None ) scheduler = CreateTrainingJobRequestScheduler( max_running_time_in_seconds=max_running_in_seconds )
request = CreateTrainingJobRequest(
4
2023-12-01 01:40:12+00:00
16k
JunMa11/UHNSeg-Quiz
nnunetv2/inference/predict_from_raw_data.py
[ { "identifier": "default_num_processes", "path": "nnunetv2/configuration.py", "snippet": "ANISO_THRESHOLD = 3 # determines when a sample is considered anisotropic (3 means that the spacing in the low" }, { "identifier": "PreprocessAdapterFromNpy", "path": "nnunetv2/inference/data_iterators....
import inspect import multiprocessing import os import traceback import numpy as np import torch import nnunetv2 import argparse import multiprocessing import argparse import multiprocessing from copy import deepcopy from time import sleep from typing import Tuple, Union, List, Optional from acvl_utils.cropping_and_padding.padding import pad_nd_image from batchgenerators.dataloading.multi_threaded_augmenter import MultiThreadedAugmenter from batchgenerators.utilities.file_and_folder_operations import load_json, join, isfile, maybe_mkdir_p, isdir, subdirs, \ save_json from torch import nn from torch._dynamo import OptimizedModule from torch.nn.parallel import DistributedDataParallel from tqdm import tqdm from nnunetv2.configuration import default_num_processes from nnunetv2.inference.data_iterators import PreprocessAdapterFromNpy, preprocessing_iterator_fromfiles, \ preprocessing_iterator_fromnpy from nnunetv2.inference.export_prediction import export_prediction_from_logits, \ convert_predicted_logits_to_segmentation_with_correct_shape from nnunetv2.inference.sliding_window_prediction import compute_gaussian, \ compute_steps_for_sliding_window from nnunetv2.utilities.file_path_utilities import get_output_folder, check_workers_alive_and_busy from nnunetv2.utilities.find_class_by_name import recursive_find_python_class from nnunetv2.utilities.helpers import empty_cache, dummy_context from nnunetv2.utilities.json_export import recursive_fix_for_json_export from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels from nnunetv2.utilities.plans_handling.plans_handler import PlansManager, ConfigurationManager from nnunetv2.utilities.utils import create_lists_from_splitted_dataset_folder from nnunetv2.paths import nnUNet_results, nnUNet_raw from nnunetv2.imageio.simpleitk_reader_writer import SimpleITKIO
10,968
if save_probabilities: tmp2 = [isfile(i + '.npz') for i in output_filename_truncated] tmp = [i and j for i, j in zip(tmp, tmp2)] not_existing_indices = [i for i, j in enumerate(tmp) if not j] output_filename_truncated = [output_filename_truncated[i] for i in not_existing_indices] list_of_lists_or_source_folder = [list_of_lists_or_source_folder[i] for i in not_existing_indices] seg_from_prev_stage_files = [seg_from_prev_stage_files[i] for i in not_existing_indices] print(f'overwrite was set to {overwrite}, so I am only working on cases that haven\'t been predicted yet. ' f'That\'s {len(not_existing_indices)} cases.') return list_of_lists_or_source_folder, output_filename_truncated, seg_from_prev_stage_files def predict_from_files(self, list_of_lists_or_source_folder: Union[str, List[List[str]]], output_folder_or_list_of_truncated_output_files: Union[str, None, List[str]], save_probabilities: bool = False, overwrite: bool = True, num_processes_preprocessing: int = default_num_processes, num_processes_segmentation_export: int = default_num_processes, folder_with_segs_from_prev_stage: str = None, num_parts: int = 1, part_id: int = 0): """ This is nnU-Net's default function for making predictions. It works best for batch predictions (predicting many images at once). """ if isinstance(output_folder_or_list_of_truncated_output_files, str): output_folder = output_folder_or_list_of_truncated_output_files elif isinstance(output_folder_or_list_of_truncated_output_files, list): output_folder = os.path.dirname(output_folder_or_list_of_truncated_output_files[0]) else: output_folder = None ######################## # let's store the input arguments so that its clear what was used to generate the prediction if output_folder is not None: my_init_kwargs = {} for k in inspect.signature(self.predict_from_files).parameters.keys(): my_init_kwargs[k] = locals()[k] my_init_kwargs = deepcopy( my_init_kwargs) # let's not unintentionally change anything in-place. Take this as a recursive_fix_for_json_export(my_init_kwargs) maybe_mkdir_p(output_folder) save_json(my_init_kwargs, join(output_folder, 'predict_from_raw_data_args.json')) # we need these two if we want to do things with the predictions like for example apply postprocessing save_json(self.dataset_json, join(output_folder, 'dataset.json'), sort_keys=False) save_json(self.plans_manager.plans, join(output_folder, 'plans.json'), sort_keys=False) ####################### # check if we need a prediction from the previous stage if self.configuration_manager.previous_stage_name is not None: assert folder_with_segs_from_prev_stage is not None, \ f'The requested configuration is a cascaded network. It requires the segmentations of the previous ' \ f'stage ({self.configuration_manager.previous_stage_name}) as input. Please provide the folder where' \ f' they are located via folder_with_segs_from_prev_stage' # sort out input and output filenames list_of_lists_or_source_folder, output_filename_truncated, seg_from_prev_stage_files = \ self._manage_input_and_output_lists(list_of_lists_or_source_folder, output_folder_or_list_of_truncated_output_files, folder_with_segs_from_prev_stage, overwrite, part_id, num_parts, save_probabilities) if len(list_of_lists_or_source_folder) == 0: return data_iterator = self._internal_get_data_iterator_from_lists_of_filenames(list_of_lists_or_source_folder, seg_from_prev_stage_files, output_filename_truncated, num_processes_preprocessing) return self.predict_from_data_iterator(data_iterator, save_probabilities, num_processes_segmentation_export) def _internal_get_data_iterator_from_lists_of_filenames(self, input_list_of_lists: List[List[str]], seg_from_prev_stage_files: Union[List[str], None], output_filenames_truncated: Union[List[str], None], num_processes: int): return preprocessing_iterator_fromfiles(input_list_of_lists, seg_from_prev_stage_files, output_filenames_truncated, self.plans_manager, self.dataset_json, self.configuration_manager, num_processes, self.device.type == 'cuda', self.verbose_preprocessing) # preprocessor = self.configuration_manager.preprocessor_class(verbose=self.verbose_preprocessing) # # hijack batchgenerators, yo # # we use the multiprocessing of the batchgenerators dataloader to handle all the background worker stuff. This # # way we don't have to reinvent the wheel here. # num_processes = max(1, min(num_processes, len(input_list_of_lists))) # ppa = PreprocessAdapter(input_list_of_lists, seg_from_prev_stage_files, preprocessor, # output_filenames_truncated, self.plans_manager, self.dataset_json, # self.configuration_manager, num_processes) # if num_processes == 0: # mta = SingleThreadedAugmenter(ppa, None) # else: # mta = MultiThreadedAugmenter(ppa, None, num_processes, 1, None, pin_memory=pin_memory) # return mta def get_data_iterator_from_raw_npy_data(self, image_or_list_of_images: Union[np.ndarray, List[np.ndarray]], segs_from_prev_stage_or_list_of_segs_from_prev_stage: Union[None, np.ndarray, List[ np.ndarray]], properties_or_list_of_properties: Union[dict, List[dict]], truncated_ofname: Union[str, List[str], None], num_processes: int = 3): list_of_images = [image_or_list_of_images] if not isinstance(image_or_list_of_images, list) else \ image_or_list_of_images if isinstance(segs_from_prev_stage_or_list_of_segs_from_prev_stage, np.ndarray): segs_from_prev_stage_or_list_of_segs_from_prev_stage = [ segs_from_prev_stage_or_list_of_segs_from_prev_stage] if isinstance(truncated_ofname, str): truncated_ofname = [truncated_ofname] if isinstance(properties_or_list_of_properties, dict): properties_or_list_of_properties = [properties_or_list_of_properties] num_processes = min(num_processes, len(list_of_images))
class nnUNetPredictor(object): def __init__(self, tile_step_size: float = 0.5, use_gaussian: bool = True, use_mirroring: bool = True, perform_everything_on_gpu: bool = True, device: torch.device = torch.device('cuda'), verbose: bool = False, verbose_preprocessing: bool = False, allow_tqdm: bool = True): self.verbose = verbose self.verbose_preprocessing = verbose_preprocessing self.allow_tqdm = allow_tqdm self.plans_manager, self.configuration_manager, self.list_of_parameters, self.network, self.dataset_json, \ self.trainer_name, self.allowed_mirroring_axes, self.label_manager = None, None, None, None, None, None, None, None self.tile_step_size = tile_step_size self.use_gaussian = use_gaussian self.use_mirroring = use_mirroring if device.type == 'cuda': # device = torch.device(type='cuda', index=0) # set the desired GPU with CUDA_VISIBLE_DEVICES! # why would I ever want to do that. Stupid dobby. This kills DDP inference... pass if device.type != 'cuda': print(f'perform_everything_on_gpu=True is only supported for cuda devices! Setting this to False') perform_everything_on_gpu = False self.device = device self.perform_everything_on_gpu = perform_everything_on_gpu def initialize_from_trained_model_folder(self, model_training_output_dir: str, use_folds: Union[Tuple[Union[int, str]], None], checkpoint_name: str = 'checkpoint_final.pth'): """ This is used when making predictions with a trained model """ if use_folds is None: use_folds = nnUNetPredictor.auto_detect_available_folds(model_training_output_dir, checkpoint_name) dataset_json = load_json(join(model_training_output_dir, 'dataset.json')) plans = load_json(join(model_training_output_dir, 'plans.json')) plans_manager = PlansManager(plans) if isinstance(use_folds, str): use_folds = [use_folds] parameters = [] for i, f in enumerate(use_folds): f = int(f) if f != 'all' else f checkpoint = torch.load(join(model_training_output_dir, f'fold_{f}', checkpoint_name), map_location=torch.device('cpu')) if i == 0: trainer_name = checkpoint['trainer_name'] configuration_name = checkpoint['init_args']['configuration'] inference_allowed_mirroring_axes = checkpoint['inference_allowed_mirroring_axes'] if \ 'inference_allowed_mirroring_axes' in checkpoint.keys() else None parameters.append(checkpoint['network_weights']) configuration_manager = plans_manager.get_configuration(configuration_name) # restore network num_input_channels = determine_num_input_channels(plans_manager, configuration_manager, dataset_json) trainer_class = recursive_find_python_class(join(nnunetv2.__path__[0], "training", "nnUNetTrainer"), trainer_name, 'nnunetv2.training.nnUNetTrainer') network = trainer_class.build_network_architecture(plans_manager, dataset_json, configuration_manager, num_input_channels, enable_deep_supervision=False) self.plans_manager = plans_manager self.configuration_manager = configuration_manager self.list_of_parameters = parameters self.network = network self.dataset_json = dataset_json self.trainer_name = trainer_name self.allowed_mirroring_axes = inference_allowed_mirroring_axes self.label_manager = plans_manager.get_label_manager(dataset_json) if ('nnUNet_compile' in os.environ.keys()) and (os.environ['nnUNet_compile'].lower() in ('true', '1', 't')) \ and not isinstance(self.network, OptimizedModule): print('compiling network') self.network = torch.compile(self.network) def manual_initialization(self, network: nn.Module, plans_manager: PlansManager, configuration_manager: ConfigurationManager, parameters: Optional[List[dict]], dataset_json: dict, trainer_name: str, inference_allowed_mirroring_axes: Optional[Tuple[int, ...]]): """ This is used by the nnUNetTrainer to initialize nnUNetPredictor for the final validation """ self.plans_manager = plans_manager self.configuration_manager = configuration_manager self.list_of_parameters = parameters self.network = network self.dataset_json = dataset_json self.trainer_name = trainer_name self.allowed_mirroring_axes = inference_allowed_mirroring_axes self.label_manager = plans_manager.get_label_manager(dataset_json) allow_compile = True allow_compile = allow_compile and ('nnUNet_compile' in os.environ.keys()) and (os.environ['nnUNet_compile'].lower() in ('true', '1', 't')) allow_compile = allow_compile and not isinstance(self.network, OptimizedModule) if isinstance(self.network, DistributedDataParallel): allow_compile = allow_compile and isinstance(self.network.module, OptimizedModule) if allow_compile: print('compiling network') self.network = torch.compile(self.network) @staticmethod def auto_detect_available_folds(model_training_output_dir, checkpoint_name): print('use_folds is None, attempting to auto detect available folds') fold_folders = subdirs(model_training_output_dir, prefix='fold_', join=False) fold_folders = [i for i in fold_folders if i != 'fold_all'] fold_folders = [i for i in fold_folders if isfile(join(model_training_output_dir, i, checkpoint_name))] use_folds = [int(i.split('_')[-1]) for i in fold_folders] print(f'found the following folds: {use_folds}') return use_folds def _manage_input_and_output_lists(self, list_of_lists_or_source_folder: Union[str, List[List[str]]], output_folder_or_list_of_truncated_output_files: Union[None, str, List[str]], folder_with_segs_from_prev_stage: str = None, overwrite: bool = True, part_id: int = 0, num_parts: int = 1, save_probabilities: bool = False): if isinstance(list_of_lists_or_source_folder, str): list_of_lists_or_source_folder = create_lists_from_splitted_dataset_folder(list_of_lists_or_source_folder, self.dataset_json['file_ending']) print(f'There are {len(list_of_lists_or_source_folder)} cases in the source folder') list_of_lists_or_source_folder = list_of_lists_or_source_folder[part_id::num_parts] caseids = [os.path.basename(i[0])[:-(len(self.dataset_json['file_ending']) + 5)] for i in list_of_lists_or_source_folder] print( f'I am process {part_id} out of {num_parts} (max process ID is {num_parts - 1}, we start counting with 0!)') print(f'There are {len(caseids)} cases that I would like to predict') if isinstance(output_folder_or_list_of_truncated_output_files, str): output_filename_truncated = [join(output_folder_or_list_of_truncated_output_files, i) for i in caseids] else: output_filename_truncated = output_folder_or_list_of_truncated_output_files seg_from_prev_stage_files = [join(folder_with_segs_from_prev_stage, i + self.dataset_json['file_ending']) if folder_with_segs_from_prev_stage is not None else None for i in caseids] # remove already predicted files form the lists if not overwrite and output_filename_truncated is not None: tmp = [isfile(i + self.dataset_json['file_ending']) for i in output_filename_truncated] if save_probabilities: tmp2 = [isfile(i + '.npz') for i in output_filename_truncated] tmp = [i and j for i, j in zip(tmp, tmp2)] not_existing_indices = [i for i, j in enumerate(tmp) if not j] output_filename_truncated = [output_filename_truncated[i] for i in not_existing_indices] list_of_lists_or_source_folder = [list_of_lists_or_source_folder[i] for i in not_existing_indices] seg_from_prev_stage_files = [seg_from_prev_stage_files[i] for i in not_existing_indices] print(f'overwrite was set to {overwrite}, so I am only working on cases that haven\'t been predicted yet. ' f'That\'s {len(not_existing_indices)} cases.') return list_of_lists_or_source_folder, output_filename_truncated, seg_from_prev_stage_files def predict_from_files(self, list_of_lists_or_source_folder: Union[str, List[List[str]]], output_folder_or_list_of_truncated_output_files: Union[str, None, List[str]], save_probabilities: bool = False, overwrite: bool = True, num_processes_preprocessing: int = default_num_processes, num_processes_segmentation_export: int = default_num_processes, folder_with_segs_from_prev_stage: str = None, num_parts: int = 1, part_id: int = 0): """ This is nnU-Net's default function for making predictions. It works best for batch predictions (predicting many images at once). """ if isinstance(output_folder_or_list_of_truncated_output_files, str): output_folder = output_folder_or_list_of_truncated_output_files elif isinstance(output_folder_or_list_of_truncated_output_files, list): output_folder = os.path.dirname(output_folder_or_list_of_truncated_output_files[0]) else: output_folder = None ######################## # let's store the input arguments so that its clear what was used to generate the prediction if output_folder is not None: my_init_kwargs = {} for k in inspect.signature(self.predict_from_files).parameters.keys(): my_init_kwargs[k] = locals()[k] my_init_kwargs = deepcopy( my_init_kwargs) # let's not unintentionally change anything in-place. Take this as a recursive_fix_for_json_export(my_init_kwargs) maybe_mkdir_p(output_folder) save_json(my_init_kwargs, join(output_folder, 'predict_from_raw_data_args.json')) # we need these two if we want to do things with the predictions like for example apply postprocessing save_json(self.dataset_json, join(output_folder, 'dataset.json'), sort_keys=False) save_json(self.plans_manager.plans, join(output_folder, 'plans.json'), sort_keys=False) ####################### # check if we need a prediction from the previous stage if self.configuration_manager.previous_stage_name is not None: assert folder_with_segs_from_prev_stage is not None, \ f'The requested configuration is a cascaded network. It requires the segmentations of the previous ' \ f'stage ({self.configuration_manager.previous_stage_name}) as input. Please provide the folder where' \ f' they are located via folder_with_segs_from_prev_stage' # sort out input and output filenames list_of_lists_or_source_folder, output_filename_truncated, seg_from_prev_stage_files = \ self._manage_input_and_output_lists(list_of_lists_or_source_folder, output_folder_or_list_of_truncated_output_files, folder_with_segs_from_prev_stage, overwrite, part_id, num_parts, save_probabilities) if len(list_of_lists_or_source_folder) == 0: return data_iterator = self._internal_get_data_iterator_from_lists_of_filenames(list_of_lists_or_source_folder, seg_from_prev_stage_files, output_filename_truncated, num_processes_preprocessing) return self.predict_from_data_iterator(data_iterator, save_probabilities, num_processes_segmentation_export) def _internal_get_data_iterator_from_lists_of_filenames(self, input_list_of_lists: List[List[str]], seg_from_prev_stage_files: Union[List[str], None], output_filenames_truncated: Union[List[str], None], num_processes: int): return preprocessing_iterator_fromfiles(input_list_of_lists, seg_from_prev_stage_files, output_filenames_truncated, self.plans_manager, self.dataset_json, self.configuration_manager, num_processes, self.device.type == 'cuda', self.verbose_preprocessing) # preprocessor = self.configuration_manager.preprocessor_class(verbose=self.verbose_preprocessing) # # hijack batchgenerators, yo # # we use the multiprocessing of the batchgenerators dataloader to handle all the background worker stuff. This # # way we don't have to reinvent the wheel here. # num_processes = max(1, min(num_processes, len(input_list_of_lists))) # ppa = PreprocessAdapter(input_list_of_lists, seg_from_prev_stage_files, preprocessor, # output_filenames_truncated, self.plans_manager, self.dataset_json, # self.configuration_manager, num_processes) # if num_processes == 0: # mta = SingleThreadedAugmenter(ppa, None) # else: # mta = MultiThreadedAugmenter(ppa, None, num_processes, 1, None, pin_memory=pin_memory) # return mta def get_data_iterator_from_raw_npy_data(self, image_or_list_of_images: Union[np.ndarray, List[np.ndarray]], segs_from_prev_stage_or_list_of_segs_from_prev_stage: Union[None, np.ndarray, List[ np.ndarray]], properties_or_list_of_properties: Union[dict, List[dict]], truncated_ofname: Union[str, List[str], None], num_processes: int = 3): list_of_images = [image_or_list_of_images] if not isinstance(image_or_list_of_images, list) else \ image_or_list_of_images if isinstance(segs_from_prev_stage_or_list_of_segs_from_prev_stage, np.ndarray): segs_from_prev_stage_or_list_of_segs_from_prev_stage = [ segs_from_prev_stage_or_list_of_segs_from_prev_stage] if isinstance(truncated_ofname, str): truncated_ofname = [truncated_ofname] if isinstance(properties_or_list_of_properties, dict): properties_or_list_of_properties = [properties_or_list_of_properties] num_processes = min(num_processes, len(list_of_images))
pp = preprocessing_iterator_fromnpy(
3
2023-12-04 19:43:14+00:00
16k
Zuricho/chroma_pipeline
chroma/models/graph_backbone.py
[ { "identifier": "validate_XC", "path": "chroma/data/xcs.py", "snippet": "def validate_XCS(all_atom=None, sequence=True):\n def decorator(func):\n def new_func(*args, **kwargs):" }, { "identifier": "basic", "path": "chroma/layers/basic.py", "snippet": "class NoOp(nn.Module):\ncl...
from types import SimpleNamespace from typing import Optional, Tuple, Union from chroma.data.xcs import validate_XC from chroma.layers import basic, graph from chroma.layers.structure import backbone, diffusion, transforms from chroma.models.graph_design import BackboneEncoderGNN from chroma.utility.model import load_model as utility_load_model from matplotlib import pyplot as plt import torch import torch.nn as nn
11,541
`layers.structure.diffusion.DiffusionChainCov` for more details on hyperparameters. Inputs: X (Tensor): Backbone coordinates with shape `(num_batch, num_residues, num_atoms, 3)`. C (LongTensor): Chain map with shape `(num_batch, num_residues)`. Outputs: neglogp (Tensor): Sum of `neglogp_S` and `neglogp_chi`. """ def __init__( self, dim_nodes: int = 128, dim_edges: int = 128, num_neighbors: int = 30, node_features: Tuple = (("internal_coords", {"log_lengths": True}),), edge_features: Tuple = ( "distances_2mer", "orientations_2mer", "distances_chain", ), num_layers: int = 3, dropout: float = 0.1, node_mlp_layers: int = 1, node_mlp_dim: Optional[int] = None, edge_update: bool = True, edge_mlp_layers: int = 1, edge_mlp_dim: Optional[int] = None, skip_connect_input: bool = False, mlp_activation: str = "softplus", decoder_num_hidden: int = 512, graph_criterion: str = "knn", graph_random_min_local: int = 20, backbone_update_method: str = "neighbor", backbone_update_iterations: int = 1, backbone_update_num_weights: int = 1, backbone_update_unconstrained: bool = True, use_time_features: bool = True, time_feature_type: str = "t", time_log_feature_scaling: float = 0.05, noise_schedule: str = "log_snr", noise_covariance_model: str = "brownian", noise_beta_min: float = 0.2, noise_beta_max: float = 70.0, noise_log_snr_range: Tuple[float] = (-7.0, 13.5), noise_complex_scaling: bool = False, loss_scale: float = 10.0, loss_scale_ssnr_cutoff: float = 0.99, loss_function: str = "squared_fape", checkpoint_gradients: bool = False, prediction_type: str = "X0", num_graph_cycles: int = 1, **kwargs, ): """Initialize GraphBackbone network.""" super(GraphBackbone, self).__init__() # Save configuration in kwargs self.kwargs = locals() self.kwargs.pop("self") for key in list(self.kwargs.keys()): if key.startswith("__") and key.endswith("__"): self.kwargs.pop(key) args = SimpleNamespace(**self.kwargs) # Important global options self.dim_nodes = args.dim_nodes self.dim_edges = args.dim_edges # Encoder GNN process backbone self.num_graph_cycles = args.num_graph_cycles self.encoders = nn.ModuleList( [ BackboneEncoderGNN( dim_nodes=args.dim_nodes, dim_edges=args.dim_edges, num_neighbors=args.num_neighbors, node_features=args.node_features, edge_features=args.edge_features, num_layers=args.num_layers, node_mlp_layers=args.node_mlp_layers, node_mlp_dim=args.node_mlp_dim, edge_update=args.edge_update, edge_mlp_layers=args.edge_mlp_layers, edge_mlp_dim=args.edge_mlp_dim, mlp_activation=args.mlp_activation, dropout=args.dropout, skip_connect_input=args.skip_connect_input, graph_criterion=args.graph_criterion, graph_random_min_local=args.graph_random_min_local, checkpoint_gradients=checkpoint_gradients, ) for i in range(self.num_graph_cycles) ] ) self.backbone_updates = nn.ModuleList( [ backbone.GraphBackboneUpdate( dim_nodes=args.dim_nodes, dim_edges=args.dim_edges, method=args.backbone_update_method, iterations=args.backbone_update_iterations, num_transform_weights=args.backbone_update_num_weights, unconstrained=args.backbone_update_unconstrained, ) for i in range(self.num_graph_cycles) ] ) self.use_time_features = args.use_time_features self.time_feature_type = args.time_feature_type self.time_log_feature_scaling = time_log_feature_scaling if self.use_time_features: self.time_features = basic.FourierFeaturization( d_input=1, d_model=dim_nodes, trainable=False, scale=16.0 )
# Copyright Generate Biomedicines, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Models for generating protein backbone structure via diffusion. """ class GraphBackbone(nn.Module): """Graph-based backbone generation for protein complexes. GraphBackbone parameterizes a generative model of the backbone coordinates of protein complexes. Args: See documention of `layers.structure.protein_graph.ProteinFeatureGraph`, `graph.GraphNN`, `layers.structure.backbone.GraphBackboneUpdate` and `layers.structure.diffusion.DiffusionChainCov` for more details on hyperparameters. Inputs: X (Tensor): Backbone coordinates with shape `(num_batch, num_residues, num_atoms, 3)`. C (LongTensor): Chain map with shape `(num_batch, num_residues)`. Outputs: neglogp (Tensor): Sum of `neglogp_S` and `neglogp_chi`. """ def __init__( self, dim_nodes: int = 128, dim_edges: int = 128, num_neighbors: int = 30, node_features: Tuple = (("internal_coords", {"log_lengths": True}),), edge_features: Tuple = ( "distances_2mer", "orientations_2mer", "distances_chain", ), num_layers: int = 3, dropout: float = 0.1, node_mlp_layers: int = 1, node_mlp_dim: Optional[int] = None, edge_update: bool = True, edge_mlp_layers: int = 1, edge_mlp_dim: Optional[int] = None, skip_connect_input: bool = False, mlp_activation: str = "softplus", decoder_num_hidden: int = 512, graph_criterion: str = "knn", graph_random_min_local: int = 20, backbone_update_method: str = "neighbor", backbone_update_iterations: int = 1, backbone_update_num_weights: int = 1, backbone_update_unconstrained: bool = True, use_time_features: bool = True, time_feature_type: str = "t", time_log_feature_scaling: float = 0.05, noise_schedule: str = "log_snr", noise_covariance_model: str = "brownian", noise_beta_min: float = 0.2, noise_beta_max: float = 70.0, noise_log_snr_range: Tuple[float] = (-7.0, 13.5), noise_complex_scaling: bool = False, loss_scale: float = 10.0, loss_scale_ssnr_cutoff: float = 0.99, loss_function: str = "squared_fape", checkpoint_gradients: bool = False, prediction_type: str = "X0", num_graph_cycles: int = 1, **kwargs, ): """Initialize GraphBackbone network.""" super(GraphBackbone, self).__init__() # Save configuration in kwargs self.kwargs = locals() self.kwargs.pop("self") for key in list(self.kwargs.keys()): if key.startswith("__") and key.endswith("__"): self.kwargs.pop(key) args = SimpleNamespace(**self.kwargs) # Important global options self.dim_nodes = args.dim_nodes self.dim_edges = args.dim_edges # Encoder GNN process backbone self.num_graph_cycles = args.num_graph_cycles self.encoders = nn.ModuleList( [ BackboneEncoderGNN( dim_nodes=args.dim_nodes, dim_edges=args.dim_edges, num_neighbors=args.num_neighbors, node_features=args.node_features, edge_features=args.edge_features, num_layers=args.num_layers, node_mlp_layers=args.node_mlp_layers, node_mlp_dim=args.node_mlp_dim, edge_update=args.edge_update, edge_mlp_layers=args.edge_mlp_layers, edge_mlp_dim=args.edge_mlp_dim, mlp_activation=args.mlp_activation, dropout=args.dropout, skip_connect_input=args.skip_connect_input, graph_criterion=args.graph_criterion, graph_random_min_local=args.graph_random_min_local, checkpoint_gradients=checkpoint_gradients, ) for i in range(self.num_graph_cycles) ] ) self.backbone_updates = nn.ModuleList( [ backbone.GraphBackboneUpdate( dim_nodes=args.dim_nodes, dim_edges=args.dim_edges, method=args.backbone_update_method, iterations=args.backbone_update_iterations, num_transform_weights=args.backbone_update_num_weights, unconstrained=args.backbone_update_unconstrained, ) for i in range(self.num_graph_cycles) ] ) self.use_time_features = args.use_time_features self.time_feature_type = args.time_feature_type self.time_log_feature_scaling = time_log_feature_scaling if self.use_time_features: self.time_features = basic.FourierFeaturization( d_input=1, d_model=dim_nodes, trainable=False, scale=16.0 )
self.noise_perturb = diffusion.DiffusionChainCov(
4
2023-11-28 00:09:40+00:00
16k
BiQiWHU/CMFormer
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "mask2former/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER...
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog, build_detection_train_loader from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from mask2former import ( COCOInstanceNewBaselineDatasetMapper, COCOPanopticNewBaselineDatasetMapper, InstanceSegEvaluator, MaskFormerInstanceDatasetMapper, MaskFormerPanopticDatasetMapper, MaskFormerSemanticDatasetMapper, SemanticSegmentorWithTTA, add_maskformer2_config, ) import warnings import copy import itertools import logging import os import torch import detectron2.utils.comm as comm
11,412
warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass os.environ['DETECTRON2_DATASETS'] = 'E:/DGtask/datasets' # MaskFormer class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance": mapper = MaskFormerInstanceDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco instance segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_instance_lsj": mapper = COCOInstanceNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco panoptic segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_panoptic_lsj":
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MaskFormer Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass os.environ['DETECTRON2_DATASETS'] = 'E:/DGtask/datasets' # MaskFormer class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance": mapper = MaskFormerInstanceDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco instance segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_instance_lsj": mapper = COCOInstanceNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco panoptic segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_panoptic_lsj":
mapper = COCOPanopticNewBaselineDatasetMapper(cfg, True)
2
2023-11-29 15:26:53+00:00
16k
PopicLab/insilicoSV
test/test_processing.py
[ { "identifier": "SV_Simulator", "path": "insilicosv/simulate.py", "snippet": "class SV_Simulator:\n def __init__(self, par_file, log_file=None):\n \"\"\"\n par_file: file location to configuration file (.yaml)\n log_file: location to store log file with diagnostic information if ...
from insilicosv.simulate import SV_Simulator from insilicosv.processing import FormatterIO from test_simulate import TestObject from pysam import VariantFile, FastaFile from collections import defaultdict, Counter from insilicosv.utils import NestedDict from insilicosv import utils from insilicosv import constants import unittest import sys import os
14,130
self.test_objects_overlap_simple = {'overlap1': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1HS", "ALR/Alpha"]}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": [2, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap2': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1HS", "ALR/Alpha"]}, "variant_sets": [{"type": "DEL", "number": 4, "min_length": [1], "max_length": [5], "num_overlap": [3, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap3': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1", "ALR"]}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": [3, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap4': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": "L1"}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": 2}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap5': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": self.test_overlap_bed_3, "allow_types": "ALR"}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": 2}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap6': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [2], "max_length": [4], "num_overlap": [1, 1, 1, 1, 1]}, {"type": "DEL", "number": 5, "min_length": [6], "max_length": [8], "num_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap7': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [1], "num_partial_overlap": [1, 1, 1, 1, 1]}, {"type": "DEL", "number": 5, "min_length": [2], "max_length": [2], "num_partial_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap8': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "dDUP", "number": 5, "min_length": [2, 1], "max_length": [4, 1], "num_overlap": [1, 1, 1, 1, 1]}, {"type": "dDUP", "number": 5, "min_length": [6, 1], "max_length": [8, 1], "num_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap9': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "dDUP", "number": 5, "min_length": [1, 1], "max_length": [1, 1], "num_partial_overlap": [1, 1, 1, 1, 1]}, {"type": "dDUP", "number": 5, "min_length": [1, 1], "max_length": [2, 1], "num_partial_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf) } self.test_objects_alu_mediated = {'alu_med1': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_4}, "variant_sets": [{"type": "DEL", "number": 1, "min_length": [13], "max_length": [15], "num_alu_mediated": 1}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.formatter = FormatterIO(self.par) def tearDown(self):
class TestProcObject(TestObject): def __init__(self, ref, par, hap1, hap2, bed, vcf): self.vcf = vcf super().__init__(ref, par, hap1, hap2, bed) def extract_bed_records(self): # parse bed record into dict for easy comparison # --> example split bed record: ['chr19', '0', '3', 'chr19', '0', '3', 'DEL', '3', '1/1', 'DEL', '1'] bed_records = [] with open(self.bed) as f: for line in f: ln = line.split() bed_record = {'source_chr': ln[0], 'source_s': ln[1], 'source_e': ln[2], 'target_chr': ln[3], 'target_s': ln[4], 'target_e': ln[5], 'ev_type': ln[6], 'len': ln[7], 'zyg': ln[8], 'parent_type': ln[9], 'sv_id': ln[10]} bed_records.append(bed_record) return bed_records def extract_vcf_records(self): vcf_records = [] vcf = VariantFile(self.vcf) for rec in vcf.fetch(): ln = str(rec).split() # separately parse info field of the form: 'END=45590417;SVTYPE=dDUP;SVLEN=539;TARGET=45581738' info = {field.split('=')[0]: field.split('=')[1] for field in ln[7].split(';')} vcf_record = {'CHROM': ln[0], 'POS': ln[1], 'ID': ln[2], 'REF': ln[3], 'ALT': ln[4], 'QUAL': ln[5], 'FILTER': ln[6], 'INFO': info, 'FORMAT': ln[8], 'SAMPLE': ln[9]} vcf_records.append(vcf_record) return vcf_records class TestProcessing(unittest.TestCase): def setUp(self): # runs before every test self.ref_file = "test/inputs/test.fa" self.par = "test/inputs/par.yaml" self.hap1 = "test/inputs/test1.fa" self.hap2 = "test/inputs/test2.fa" self.bed = "test/inputs/out.bed" self.vcf = "test/inputs/out.vcf" self.ins_fasta = "test/inputs/ins_fasta.fa" self.test_overlap_bed = "test/inputs/example_overlap_events.bed" self.test_overlap_bed_2 = "test/inputs/example_overlap_events_2.bed" # test_overlap_bed_3: events with differing chromosome self.test_overlap_bed_3 = "test/inputs/example_overlap_events_3.bed" self.test_overlap_bed_4 = "test/inputs/example_overlap_events_4.bed" self.test_overlap_bed_11 = "test/inputs/example_overlap_events_11.bed" self.test_objects_simple_events = {'DEL': TestProcObject([self.ref_file, {"chr19": "CTG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 50, "prioritize_top": True}, "variant_sets": [{"type": "DEL", "number": 1, "max_length": [3], "min_length": [3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'DUP': TestProcObject([self.ref_file, {"chr19": "CTG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 50, "prioritize_top": True}, "variant_sets": [{"type": "DUP", "number": 1, "max_length": [3], "min_length": [3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INV': TestProcObject([self.ref_file, {"chr19": "CTG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 50, "prioritize_top": True}, "variant_sets": [{"type": "INV", "number": 1, "max_length": [3], "min_length": [3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INS': TestProcObject([self.ref_file, {"chr19": "C"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 50, "prioritize_top": True}, "variant_sets": [{"type": "INS", "number": 1, "max_length": [3], "min_length": [3]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_flanked_inversions = {'dupINVdup': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dupINVdup", "number": 1, "max_length": [2, 2, 2], "min_length": [2, 2, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'delINVdel': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "delINVdel", "number": 1, "max_length": [2, 2, 2], "min_length": [2, 2, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'dupINVdel': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dupINVdel", "number": 1, "max_length": [2, 2, 2], "min_length": [2, 2, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'delINVdup': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "delINVdup", "number": 1, "max_length": [2, 2, 2], "min_length": [2, 2, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_dispersions = {'dDUP': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dDUP", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INV_dDUP': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INV_dDUP", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'TRA': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "TRA", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_del_inv = {'delINV': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "delINV", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INVdel': TestProcObject([self.ref_file, {"chr19": "ACTGTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INVdel", "number": 1, "max_length": [3, 3], "min_length": [3, 3]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_idel = {'dDUP_iDEL': TestProcObject([self.ref_file, {"chr19": "ACTGTCAG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dDUP_iDEL", "number": 1, "max_length": [3, 3, 2], "min_length": [3, 3, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INS_iDEL': TestProcObject([self.ref_file, {"chr19": "ACTGTCAG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INS_iDEL", "number": 1, "max_length": [3, 3, 2], "min_length": [3, 3, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_dup_inv = {'dup_INV': TestProcObject([self.ref_file, {"chr19": "ACTGTCAG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "dup_INV", "number": 1, "max_length": [4, 4], "min_length": [4, 4]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'INV_dup': TestProcObject([self.ref_file, {"chr19": "ACTGTCAG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INV_dup", "number": 1, "max_length": [4, 4], "min_length": [4, 4]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_INVdup = {'INVdup': TestProcObject([self.ref_file, {"chr19": "ACTG"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "variant_sets": [{"type": "INVdup", "number": 1, "max_length": [4], "min_length": [4]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_multievent = {'INVdup': TestProcObject([self.ref_file, {"chr19": "ACTGCTAATGCGTTCACTGCTAATGCGTTC"}], [self.par, {"sim_settings": {"reference": self.ref_file, "max_tries": 200, "prioritize_top": True}, "variant_sets": [{"type": "INVdup", "number": 3, "max_length": [4], "min_length": [2]}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.test_objects_overlap_simple = {'overlap1': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1HS", "ALR/Alpha"]}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": [2, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap2': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1HS", "ALR/Alpha"]}, "variant_sets": [{"type": "DEL", "number": 4, "min_length": [1], "max_length": [5], "num_overlap": [3, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap3': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": ["L1", "ALR"]}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": [3, 2]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap4': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": [self.test_overlap_bed, self.test_overlap_bed_2], "allow_types": "L1"}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": 2}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap5': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": { "bed": self.test_overlap_bed_3, "allow_types": "ALR"}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [5], "num_overlap": 2}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap6': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [2], "max_length": [4], "num_overlap": [1, 1, 1, 1, 1]}, {"type": "DEL", "number": 5, "min_length": [6], "max_length": [8], "num_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap7': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "DEL", "number": 5, "min_length": [1], "max_length": [1], "num_partial_overlap": [1, 1, 1, 1, 1]}, {"type": "DEL", "number": 5, "min_length": [2], "max_length": [2], "num_partial_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap8': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "dDUP", "number": 5, "min_length": [2, 1], "max_length": [4, 1], "num_overlap": [1, 1, 1, 1, 1]}, {"type": "dDUP", "number": 5, "min_length": [6, 1], "max_length": [8, 1], "num_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf), 'overlap9': TestProcObject([self.ref_file, {"chr21": "CCTCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTATCCGTCGTACTAAGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_11, "allow_types": ['Alu', 'L1', 'L2', 'SVA', 'HERVK']}, "variant_sets": [{"type": "dDUP", "number": 5, "min_length": [1, 1], "max_length": [1, 1], "num_partial_overlap": [1, 1, 1, 1, 1]}, {"type": "dDUP", "number": 5, "min_length": [1, 1], "max_length": [2, 1], "num_partial_overlap": [1, 1, 1, 1, 1]}]}], self.hap1, self.hap2, self.bed, self.vcf) } self.test_objects_alu_mediated = {'alu_med1': TestProcObject([self.ref_file, {"chr21": "CTCCGTCGTACTAAGTCGTACTCCGTCGTACTAAGTCGTA"}], [self.par, {"sim_settings": {"reference": self.ref_file, "prioritize_top": True, "fail_if_placement_issues": True}, "overlap_events": {"bed": self.test_overlap_bed_4}, "variant_sets": [{"type": "DEL", "number": 1, "min_length": [13], "max_length": [15], "num_alu_mediated": 1}]}], self.hap1, self.hap2, self.bed, self.vcf)} self.formatter = FormatterIO(self.par) def tearDown(self):
utils.remove_file(self.ins_fasta)
3
2023-12-01 14:39:20+00:00
16k
BiQiWHU/BWG
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "mask2former/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER...
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog, build_detection_train_loader from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from mask2former import ( COCOInstanceNewBaselineDatasetMapper, COCOPanopticNewBaselineDatasetMapper, InstanceSegEvaluator, MaskFormerInstanceDatasetMapper, MaskFormerPanopticDatasetMapper, MaskFormerSemanticDatasetMapper, SemanticSegmentorWithTTA, add_maskformer2_config, ) import warnings import copy import itertools import logging import os import torch import detectron2.utils.comm as comm
11,304
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MaskFormer Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass os.environ['DETECTRON2_DATASETS'] = 'E:/DGtask/datasets' # MaskFormer class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance":
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MaskFormer Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass os.environ['DETECTRON2_DATASETS'] = 'E:/DGtask/datasets' # MaskFormer class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance":
mapper = MaskFormerInstanceDatasetMapper(cfg, True)
3
2023-11-29 17:15:46+00:00
16k
opisaac9001/TTS-With-ooba-and-voice
TTS/tts/models/tacotron.py
[ { "identifier": "CapacitronVAE", "path": "TTS/tts/layers/tacotron/capacitron_layers.py", "snippet": "class CapacitronVAE(nn.Module):\n \"\"\"Effective Use of Variational Embedding Capacity for prosody transfer.\n\n See https://arxiv.org/abs/1906.03402\"\"\"\n\n def __init__(\n self,\n ...
from typing import Dict, List, Tuple, Union from torch import nn from torch.cuda.amp.autocast_mode import autocast from trainer.trainer_utils import get_optimizer, get_scheduler from TTS.tts.layers.tacotron.capacitron_layers import CapacitronVAE from TTS.tts.layers.tacotron.gst_layers import GST from TTS.tts.layers.tacotron.tacotron import Decoder, Encoder, PostCBHG from TTS.tts.models.base_tacotron import BaseTacotron from TTS.tts.utils.measures import alignment_diagonal_score from TTS.tts.utils.speakers import SpeakerManager from TTS.tts.utils.text.tokenizer import TTSTokenizer from TTS.tts.utils.visual import plot_alignment, plot_spectrogram from TTS.utils.capacitron_optimizer import CapacitronOptimizer from TTS.utils.audio import AudioProcessor import torch
14,140
""" text_input = batch["text_input"] text_lengths = batch["text_lengths"] mel_input = batch["mel_input"] mel_lengths = batch["mel_lengths"] linear_input = batch["linear_input"] stop_targets = batch["stop_targets"] stop_target_lengths = batch["stop_target_lengths"] speaker_ids = batch["speaker_ids"] d_vectors = batch["d_vectors"] aux_input = {"speaker_ids": speaker_ids, "d_vectors": d_vectors} outputs = self.forward(text_input, text_lengths, mel_input, mel_lengths, aux_input) # set the [alignment] lengths wrt reduction factor for guided attention if mel_lengths.max() % self.decoder.r != 0: alignment_lengths = ( mel_lengths + (self.decoder.r - (mel_lengths.max() % self.decoder.r)) ) // self.decoder.r else: alignment_lengths = mel_lengths // self.decoder.r # compute loss with autocast(enabled=False): # use float32 for the criterion loss_dict = criterion( outputs["model_outputs"].float(), outputs["decoder_outputs"].float(), mel_input.float(), linear_input.float(), outputs["stop_tokens"].float(), stop_targets.float(), stop_target_lengths, outputs["capacitron_vae_outputs"] if self.capacitron_vae else None, mel_lengths, None if outputs["decoder_outputs_backward"] is None else outputs["decoder_outputs_backward"].float(), outputs["alignments"].float(), alignment_lengths, None if outputs["alignments_backward"] is None else outputs["alignments_backward"].float(), text_lengths, ) # compute alignment error (the lower the better ) align_error = 1 - alignment_diagonal_score(outputs["alignments"]) loss_dict["align_error"] = align_error return outputs, loss_dict def get_optimizer(self) -> List: if self.use_capacitron_vae: return CapacitronOptimizer(self.config, self.named_parameters()) return get_optimizer(self.config.optimizer, self.config.optimizer_params, self.config.lr, self) def get_scheduler(self, optimizer: object): opt = optimizer.primary_optimizer if self.use_capacitron_vae else optimizer return get_scheduler(self.config.lr_scheduler, self.config.lr_scheduler_params, opt) def before_gradient_clipping(self): if self.use_capacitron_vae: # Capacitron model specific gradient clipping model_params_to_clip = [] for name, param in self.named_parameters(): if param.requires_grad: if name != "capacitron_vae_layer.beta": model_params_to_clip.append(param) torch.nn.utils.clip_grad_norm_(model_params_to_clip, self.capacitron_vae.capacitron_grad_clip) def _create_logs(self, batch, outputs, ap): postnet_outputs = outputs["model_outputs"] decoder_outputs = outputs["decoder_outputs"] alignments = outputs["alignments"] alignments_backward = outputs["alignments_backward"] mel_input = batch["mel_input"] linear_input = batch["linear_input"] pred_linear_spec = postnet_outputs[0].data.cpu().numpy() pred_mel_spec = decoder_outputs[0].data.cpu().numpy() gt_linear_spec = linear_input[0].data.cpu().numpy() gt_mel_spec = mel_input[0].data.cpu().numpy() align_img = alignments[0].data.cpu().numpy() figures = { "pred_linear_spec": plot_spectrogram(pred_linear_spec, ap, output_fig=False), "real_linear_spec": plot_spectrogram(gt_linear_spec, ap, output_fig=False), "pred_mel_spec": plot_spectrogram(pred_mel_spec, ap, output_fig=False), "real_mel_spec": plot_spectrogram(gt_mel_spec, ap, output_fig=False), "alignment": plot_alignment(align_img, output_fig=False), } if self.bidirectional_decoder or self.double_decoder_consistency: figures["alignment_backward"] = plot_alignment(alignments_backward[0].data.cpu().numpy(), output_fig=False) # Sample audio audio = ap.inv_spectrogram(pred_linear_spec.T) return figures, {"audio": audio} def train_log( self, batch: dict, outputs: dict, logger: "Logger", assets: dict, steps: int ) -> None: # pylint: disable=no-self-use figures, audios = self._create_logs(batch, outputs, self.ap) logger.train_figures(steps, figures) logger.train_audios(steps, audios, self.ap.sample_rate) def eval_step(self, batch: dict, criterion: nn.Module): return self.train_step(batch, criterion) def eval_log(self, batch: dict, outputs: dict, logger: "Logger", assets: dict, steps: int) -> None: figures, audios = self._create_logs(batch, outputs, self.ap) logger.eval_figures(steps, figures) logger.eval_audios(steps, audios, self.ap.sample_rate) @staticmethod def init_from_config(config: "TacotronConfig", samples: Union[List[List], List[Dict]] = None): """Initiate model from config Args: config (TacotronConfig): Model config. samples (Union[List[List], List[Dict]]): Training samples to parse speaker ids for training. Defaults to None. """ ap = AudioProcessor.init_from_config(config)
# coding: utf-8 class Tacotron(BaseTacotron): """Tacotron as in https://arxiv.org/abs/1703.10135 It's an autoregressive encoder-attention-decoder-postnet architecture. Check `TacotronConfig` for the arguments. Args: config (TacotronConfig): Configuration for the Tacotron model. speaker_manager (SpeakerManager): Speaker manager to handle multi-speaker settings. Only use if the model is a multi-speaker model. Defaults to None. """ def __init__( self, config: "TacotronConfig", ap: "AudioProcessor" = None, tokenizer: "TTSTokenizer" = None, speaker_manager: SpeakerManager = None, ): super().__init__(config, ap, tokenizer, speaker_manager) # pass all config fields to `self` # for fewer code change for key in config: setattr(self, key, config[key]) # set speaker embedding channel size for determining `in_channels` for the connected layers. # `init_multispeaker` needs to be called once more in training to initialize the speaker embedding layer based # on the number of speakers infered from the dataset. if self.use_speaker_embedding or self.use_d_vector_file: self.init_multispeaker(config) self.decoder_in_features += self.embedded_speaker_dim # add speaker embedding dim if self.use_gst: self.decoder_in_features += self.gst.gst_embedding_dim if self.use_capacitron_vae: self.decoder_in_features += self.capacitron_vae.capacitron_VAE_embedding_dim # embedding layer self.embedding = nn.Embedding(self.num_chars, 256, padding_idx=0) self.embedding.weight.data.normal_(0, 0.3) # base model layers self.encoder = Encoder(self.encoder_in_features) self.decoder = Decoder( self.decoder_in_features, self.decoder_output_dim, self.r, self.memory_size, self.attention_type, self.windowing, self.attention_norm, self.prenet_type, self.prenet_dropout, self.use_forward_attn, self.transition_agent, self.forward_attn_mask, self.location_attn, self.attention_heads, self.separate_stopnet, self.max_decoder_steps, ) self.postnet = PostCBHG(self.decoder_output_dim) self.last_linear = nn.Linear(self.postnet.cbhg.gru_features * 2, self.out_channels) # setup prenet dropout self.decoder.prenet.dropout_at_inference = self.prenet_dropout_at_inference # global style token layers if self.gst and self.use_gst: self.gst_layer = GST( num_mel=self.decoder_output_dim, num_heads=self.gst.gst_num_heads, num_style_tokens=self.gst.gst_num_style_tokens, gst_embedding_dim=self.gst.gst_embedding_dim, ) # Capacitron layers if self.capacitron_vae and self.use_capacitron_vae: self.capacitron_vae_layer = CapacitronVAE( num_mel=self.decoder_output_dim, encoder_output_dim=self.encoder_in_features, capacitron_VAE_embedding_dim=self.capacitron_vae.capacitron_VAE_embedding_dim, speaker_embedding_dim=self.embedded_speaker_dim if self.use_speaker_embedding and self.capacitron_vae.capacitron_use_speaker_embedding else None, text_summary_embedding_dim=self.capacitron_vae.capacitron_text_summary_embedding_dim if self.capacitron_vae.capacitron_use_text_summary_embeddings else None, ) # backward pass decoder if self.bidirectional_decoder: self._init_backward_decoder() # setup DDC if self.double_decoder_consistency: self.coarse_decoder = Decoder( self.decoder_in_features, self.decoder_output_dim, self.ddc_r, self.memory_size, self.attention_type, self.windowing, self.attention_norm, self.prenet_type, self.prenet_dropout, self.use_forward_attn, self.transition_agent, self.forward_attn_mask, self.location_attn, self.attention_heads, self.separate_stopnet, self.max_decoder_steps, ) def forward( # pylint: disable=dangerous-default-value self, text, text_lengths, mel_specs=None, mel_lengths=None, aux_input={"speaker_ids": None, "d_vectors": None} ): """ Shapes: text: [B, T_in] text_lengths: [B] mel_specs: [B, T_out, C] mel_lengths: [B] aux_input: 'speaker_ids': [B, 1] and 'd_vectors':[B, C] """ aux_input = self._format_aux_input(aux_input) outputs = {"alignments_backward": None, "decoder_outputs_backward": None} inputs = self.embedding(text) input_mask, output_mask = self.compute_masks(text_lengths, mel_lengths) # B x T_in x encoder_in_features encoder_outputs = self.encoder(inputs) # sequence masking encoder_outputs = encoder_outputs * input_mask.unsqueeze(2).expand_as(encoder_outputs) # global style token if self.gst and self.use_gst: # B x gst_dim encoder_outputs = self.compute_gst(encoder_outputs, mel_specs) # speaker embedding if self.use_speaker_embedding or self.use_d_vector_file: if not self.use_d_vector_file: # B x 1 x speaker_embed_dim embedded_speakers = self.speaker_embedding(aux_input["speaker_ids"])[:, None] else: # B x 1 x speaker_embed_dim embedded_speakers = torch.unsqueeze(aux_input["d_vectors"], 1) encoder_outputs = self._concat_speaker_embedding(encoder_outputs, embedded_speakers) # Capacitron if self.capacitron_vae and self.use_capacitron_vae: # B x capacitron_VAE_embedding_dim encoder_outputs, *capacitron_vae_outputs = self.compute_capacitron_VAE_embedding( encoder_outputs, reference_mel_info=[mel_specs, mel_lengths], text_info=[inputs, text_lengths] if self.capacitron_vae.capacitron_use_text_summary_embeddings else None, speaker_embedding=embedded_speakers if self.capacitron_vae.capacitron_use_speaker_embedding else None, ) else: capacitron_vae_outputs = None # decoder_outputs: B x decoder_in_features x T_out # alignments: B x T_in x encoder_in_features # stop_tokens: B x T_in decoder_outputs, alignments, stop_tokens = self.decoder(encoder_outputs, mel_specs, input_mask) # sequence masking if output_mask is not None: decoder_outputs = decoder_outputs * output_mask.unsqueeze(1).expand_as(decoder_outputs) # B x T_out x decoder_in_features postnet_outputs = self.postnet(decoder_outputs) # sequence masking if output_mask is not None: postnet_outputs = postnet_outputs * output_mask.unsqueeze(2).expand_as(postnet_outputs) # B x T_out x posnet_dim postnet_outputs = self.last_linear(postnet_outputs) # B x T_out x decoder_in_features decoder_outputs = decoder_outputs.transpose(1, 2).contiguous() if self.bidirectional_decoder: decoder_outputs_backward, alignments_backward = self._backward_pass(mel_specs, encoder_outputs, input_mask) outputs["alignments_backward"] = alignments_backward outputs["decoder_outputs_backward"] = decoder_outputs_backward if self.double_decoder_consistency: decoder_outputs_backward, alignments_backward = self._coarse_decoder_pass( mel_specs, encoder_outputs, alignments, input_mask ) outputs["alignments_backward"] = alignments_backward outputs["decoder_outputs_backward"] = decoder_outputs_backward outputs.update( { "model_outputs": postnet_outputs, "decoder_outputs": decoder_outputs, "alignments": alignments, "stop_tokens": stop_tokens, "capacitron_vae_outputs": capacitron_vae_outputs, } ) return outputs @torch.no_grad() def inference(self, text_input, aux_input=None): aux_input = self._format_aux_input(aux_input) inputs = self.embedding(text_input) encoder_outputs = self.encoder(inputs) if self.gst and self.use_gst: # B x gst_dim encoder_outputs = self.compute_gst(encoder_outputs, aux_input["style_mel"], aux_input["d_vectors"]) if self.capacitron_vae and self.use_capacitron_vae: if aux_input["style_text"] is not None: style_text_embedding = self.embedding(aux_input["style_text"]) style_text_length = torch.tensor([style_text_embedding.size(1)], dtype=torch.int64).to( encoder_outputs.device ) # pylint: disable=not-callable reference_mel_length = ( torch.tensor([aux_input["style_mel"].size(1)], dtype=torch.int64).to(encoder_outputs.device) if aux_input["style_mel"] is not None else None ) # pylint: disable=not-callable # B x capacitron_VAE_embedding_dim encoder_outputs, *_ = self.compute_capacitron_VAE_embedding( encoder_outputs, reference_mel_info=[aux_input["style_mel"], reference_mel_length] if aux_input["style_mel"] is not None else None, text_info=[style_text_embedding, style_text_length] if aux_input["style_text"] is not None else None, speaker_embedding=aux_input["d_vectors"] if self.capacitron_vae.capacitron_use_speaker_embedding else None, ) if self.num_speakers > 1: if not self.use_d_vector_file: # B x 1 x speaker_embed_dim embedded_speakers = self.speaker_embedding(aux_input["speaker_ids"]) # reshape embedded_speakers if embedded_speakers.ndim == 1: embedded_speakers = embedded_speakers[None, None, :] elif embedded_speakers.ndim == 2: embedded_speakers = embedded_speakers[None, :] else: # B x 1 x speaker_embed_dim embedded_speakers = torch.unsqueeze(aux_input["d_vectors"], 1) encoder_outputs = self._concat_speaker_embedding(encoder_outputs, embedded_speakers) decoder_outputs, alignments, stop_tokens = self.decoder.inference(encoder_outputs) postnet_outputs = self.postnet(decoder_outputs) postnet_outputs = self.last_linear(postnet_outputs) decoder_outputs = decoder_outputs.transpose(1, 2) outputs = { "model_outputs": postnet_outputs, "decoder_outputs": decoder_outputs, "alignments": alignments, "stop_tokens": stop_tokens, } return outputs def before_backward_pass(self, loss_dict, optimizer) -> None: # Extracting custom training specific operations for capacitron # from the trainer if self.use_capacitron_vae: loss_dict["capacitron_vae_beta_loss"].backward() optimizer.first_step() def train_step(self, batch: Dict, criterion: torch.nn.Module) -> Tuple[Dict, Dict]: """Perform a single training step by fetching the right set of samples from the batch. Args: batch ([Dict]): A dictionary of input tensors. criterion ([torch.nn.Module]): Callable criterion to compute model loss. """ text_input = batch["text_input"] text_lengths = batch["text_lengths"] mel_input = batch["mel_input"] mel_lengths = batch["mel_lengths"] linear_input = batch["linear_input"] stop_targets = batch["stop_targets"] stop_target_lengths = batch["stop_target_lengths"] speaker_ids = batch["speaker_ids"] d_vectors = batch["d_vectors"] aux_input = {"speaker_ids": speaker_ids, "d_vectors": d_vectors} outputs = self.forward(text_input, text_lengths, mel_input, mel_lengths, aux_input) # set the [alignment] lengths wrt reduction factor for guided attention if mel_lengths.max() % self.decoder.r != 0: alignment_lengths = ( mel_lengths + (self.decoder.r - (mel_lengths.max() % self.decoder.r)) ) // self.decoder.r else: alignment_lengths = mel_lengths // self.decoder.r # compute loss with autocast(enabled=False): # use float32 for the criterion loss_dict = criterion( outputs["model_outputs"].float(), outputs["decoder_outputs"].float(), mel_input.float(), linear_input.float(), outputs["stop_tokens"].float(), stop_targets.float(), stop_target_lengths, outputs["capacitron_vae_outputs"] if self.capacitron_vae else None, mel_lengths, None if outputs["decoder_outputs_backward"] is None else outputs["decoder_outputs_backward"].float(), outputs["alignments"].float(), alignment_lengths, None if outputs["alignments_backward"] is None else outputs["alignments_backward"].float(), text_lengths, ) # compute alignment error (the lower the better ) align_error = 1 - alignment_diagonal_score(outputs["alignments"]) loss_dict["align_error"] = align_error return outputs, loss_dict def get_optimizer(self) -> List: if self.use_capacitron_vae: return CapacitronOptimizer(self.config, self.named_parameters()) return get_optimizer(self.config.optimizer, self.config.optimizer_params, self.config.lr, self) def get_scheduler(self, optimizer: object): opt = optimizer.primary_optimizer if self.use_capacitron_vae else optimizer return get_scheduler(self.config.lr_scheduler, self.config.lr_scheduler_params, opt) def before_gradient_clipping(self): if self.use_capacitron_vae: # Capacitron model specific gradient clipping model_params_to_clip = [] for name, param in self.named_parameters(): if param.requires_grad: if name != "capacitron_vae_layer.beta": model_params_to_clip.append(param) torch.nn.utils.clip_grad_norm_(model_params_to_clip, self.capacitron_vae.capacitron_grad_clip) def _create_logs(self, batch, outputs, ap): postnet_outputs = outputs["model_outputs"] decoder_outputs = outputs["decoder_outputs"] alignments = outputs["alignments"] alignments_backward = outputs["alignments_backward"] mel_input = batch["mel_input"] linear_input = batch["linear_input"] pred_linear_spec = postnet_outputs[0].data.cpu().numpy() pred_mel_spec = decoder_outputs[0].data.cpu().numpy() gt_linear_spec = linear_input[0].data.cpu().numpy() gt_mel_spec = mel_input[0].data.cpu().numpy() align_img = alignments[0].data.cpu().numpy() figures = { "pred_linear_spec": plot_spectrogram(pred_linear_spec, ap, output_fig=False), "real_linear_spec": plot_spectrogram(gt_linear_spec, ap, output_fig=False), "pred_mel_spec": plot_spectrogram(pred_mel_spec, ap, output_fig=False), "real_mel_spec": plot_spectrogram(gt_mel_spec, ap, output_fig=False), "alignment": plot_alignment(align_img, output_fig=False), } if self.bidirectional_decoder or self.double_decoder_consistency: figures["alignment_backward"] = plot_alignment(alignments_backward[0].data.cpu().numpy(), output_fig=False) # Sample audio audio = ap.inv_spectrogram(pred_linear_spec.T) return figures, {"audio": audio} def train_log( self, batch: dict, outputs: dict, logger: "Logger", assets: dict, steps: int ) -> None: # pylint: disable=no-self-use figures, audios = self._create_logs(batch, outputs, self.ap) logger.train_figures(steps, figures) logger.train_audios(steps, audios, self.ap.sample_rate) def eval_step(self, batch: dict, criterion: nn.Module): return self.train_step(batch, criterion) def eval_log(self, batch: dict, outputs: dict, logger: "Logger", assets: dict, steps: int) -> None: figures, audios = self._create_logs(batch, outputs, self.ap) logger.eval_figures(steps, figures) logger.eval_audios(steps, audios, self.ap.sample_rate) @staticmethod def init_from_config(config: "TacotronConfig", samples: Union[List[List], List[Dict]] = None): """Initiate model from config Args: config (TacotronConfig): Model config. samples (Union[List[List], List[Dict]]): Training samples to parse speaker ids for training. Defaults to None. """ ap = AudioProcessor.init_from_config(config)
tokenizer, new_config = TTSTokenizer.init_from_config(config)
8
2023-11-29 08:15:06+00:00
16k
wenquanlu/HandRefiner
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler
12,479
assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None):
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
2
2023-11-24 10:19:23+00:00
16k
eth-sri/language-model-arithmetic
src/model_arithmetic/model_arithmetic.py
[ { "identifier": "load_model", "path": "src/model_arithmetic/basic_model_loader.py", "snippet": "def load_model(dir_or_model, classification=False, token_classification=False, return_tokenizer=False, dtype=torch.bfloat16, load_dtype=True, \n rl=False, peft_config=None):\n \"\"\"\n Th...
from transformers import PreTrainedModel from .basic_model_loader import load_model, load_tokenizer from .utils import get_max_length, ENABLE_LOGGING, log from collections import namedtuple from transformers import top_k_top_p_filtering from loguru import logger from .operators import Operator from .monitor import Monitor from .runnable_operators import RunnableOperator, PromptedLLM from .input import TokenizedInput from .lm_eval_compatibility import Compatibility import json import numpy as np import torch import os import time import random
11,440
class ModelArithmetic(PreTrainedModel): """ Main class for prompt arithmetic. Handles the generation of text based on the formula. """ SAVE_FILE = "prompt_arithmetic.json" _supports_sdpa = True def __init__(self, formula : Operator, default_model : str = None, dtype=torch.bfloat16, intermediate_argmax : bool = False, epsilon = 1e-12, retroactive_operators = [], calculate_statistics=True, needs_input_tokens_lm_eval=False, lm_eval_task=None, tokenizer=None): """Initializes the prompt arithmetic model. Args: formula (Operator): The formula for which generations need to be made. default_model (str, optional): Default model for RunnableOperators that don't have a model associated with them. Defaults to None. dtype (torch.dtype, optional): Dtype of the models to load by default. Defaults to torch.bfloat16. intermediate_argmax (bool, optional): Something unimportant that was tried out, but now deprecated. Defaults to False. epsilon (float, optional): Just some small value. Defaults to 1e-12. retroactive_operators (list, optional): The retroactive operators that need to be applied. Defaults to []. calculate_statistics (bool, optional): Whether or not to calculate some statistics, can be a tad bit expensive. Defaults to True. needs_input_tokens_lm_eval (bool, optional): Whether or not lm eval is used and whether or not the task needs the input tokens. Defaults to False. Only set to true for an lm eval task. lm_eval_task (str, optional): Name of the lm eval task. Defaults to None. tokenizer (transformers.tokenization_utils_base.PreTrainedTokenizerBase, optional): Tokenizer to use. Defaults to None. """ self.formula = formula.clone() self.default_model = default_model self.loaded_models = dict() self.model_prediction_history = [] # keeps track of the RunnableOperators predictions for each token (that hasn't finished computing) self.logprobs_history = [] # keeps track of the current probability distribution for which each token has been drawn self.model_last_token_prediction = [] # keeps track of the last token that has been predicted for each RunnableOperator self.output_type = namedtuple("ModelArithmeticOutput", ["logits", "logprobs_per_model"]) self.intermediate_argmax = intermediate_argmax self.retroactive_operators = retroactive_operators self.calculate_statistics = calculate_statistics self.runnable_operators = [] for runnable_operator in self.formula.runnable_operators(): if not any([runnable_operator.same_operator(output) for output in self.runnable_operators]): self.runnable_operators.append(runnable_operator) # sort the prompts by speculative factor, putting the one with highest speculative factor first # => run model with highest speculative factor first, since otherwise the computation might be wasted for the first ones # however, we first need to sort by run_priority and then within that by speculative factor self.runnable_operators = sorted(self.runnable_operators, key=lambda runnable_operator: (runnable_operator.run_priority, runnable_operator.speculative_factor), reverse=True) self.load_all_models(dtype=dtype) if self.default_model not in self.loaded_models: for runnable_operator in self.runnable_operators: if isinstance(runnable_operator, PromptedLLM) and runnable_operator.model is not None: self.default_model = runnable_operator.model break if self.default_model is None: raise ValueError("Default model must be specified if not specified in an llm prompt") self.config = self.loaded_models[str(self.default_model)].config if tokenizer is None: self.tokenizer = load_tokenizer(self.default_model) else: self.tokenizer = tokenizer self.init_runnable_operators() self.model_input_tokens = {
class ModelArithmetic(PreTrainedModel): """ Main class for prompt arithmetic. Handles the generation of text based on the formula. """ SAVE_FILE = "prompt_arithmetic.json" _supports_sdpa = True def __init__(self, formula : Operator, default_model : str = None, dtype=torch.bfloat16, intermediate_argmax : bool = False, epsilon = 1e-12, retroactive_operators = [], calculate_statistics=True, needs_input_tokens_lm_eval=False, lm_eval_task=None, tokenizer=None): """Initializes the prompt arithmetic model. Args: formula (Operator): The formula for which generations need to be made. default_model (str, optional): Default model for RunnableOperators that don't have a model associated with them. Defaults to None. dtype (torch.dtype, optional): Dtype of the models to load by default. Defaults to torch.bfloat16. intermediate_argmax (bool, optional): Something unimportant that was tried out, but now deprecated. Defaults to False. epsilon (float, optional): Just some small value. Defaults to 1e-12. retroactive_operators (list, optional): The retroactive operators that need to be applied. Defaults to []. calculate_statistics (bool, optional): Whether or not to calculate some statistics, can be a tad bit expensive. Defaults to True. needs_input_tokens_lm_eval (bool, optional): Whether or not lm eval is used and whether or not the task needs the input tokens. Defaults to False. Only set to true for an lm eval task. lm_eval_task (str, optional): Name of the lm eval task. Defaults to None. tokenizer (transformers.tokenization_utils_base.PreTrainedTokenizerBase, optional): Tokenizer to use. Defaults to None. """ self.formula = formula.clone() self.default_model = default_model self.loaded_models = dict() self.model_prediction_history = [] # keeps track of the RunnableOperators predictions for each token (that hasn't finished computing) self.logprobs_history = [] # keeps track of the current probability distribution for which each token has been drawn self.model_last_token_prediction = [] # keeps track of the last token that has been predicted for each RunnableOperator self.output_type = namedtuple("ModelArithmeticOutput", ["logits", "logprobs_per_model"]) self.intermediate_argmax = intermediate_argmax self.retroactive_operators = retroactive_operators self.calculate_statistics = calculate_statistics self.runnable_operators = [] for runnable_operator in self.formula.runnable_operators(): if not any([runnable_operator.same_operator(output) for output in self.runnable_operators]): self.runnable_operators.append(runnable_operator) # sort the prompts by speculative factor, putting the one with highest speculative factor first # => run model with highest speculative factor first, since otherwise the computation might be wasted for the first ones # however, we first need to sort by run_priority and then within that by speculative factor self.runnable_operators = sorted(self.runnable_operators, key=lambda runnable_operator: (runnable_operator.run_priority, runnable_operator.speculative_factor), reverse=True) self.load_all_models(dtype=dtype) if self.default_model not in self.loaded_models: for runnable_operator in self.runnable_operators: if isinstance(runnable_operator, PromptedLLM) and runnable_operator.model is not None: self.default_model = runnable_operator.model break if self.default_model is None: raise ValueError("Default model must be specified if not specified in an llm prompt") self.config = self.loaded_models[str(self.default_model)].config if tokenizer is None: self.tokenizer = load_tokenizer(self.default_model) else: self.tokenizer = tokenizer self.init_runnable_operators() self.model_input_tokens = {
runnable_operator.id(): TokenizedInput(runnable_operator,
9
2023-11-21 20:01:08+00:00
16k
huang-yh/SelfOcc
model/encoder/tpvformer/tpvformer_encoder.py
[ { "identifier": "BaseEncoder", "path": "model/encoder/base_encoder.py", "snippet": "class BaseEncoder(BaseModule):\n \"\"\"Further encode 3D representations.\n image backbone -> neck -> lifter -> encoder -> segmentor\n \"\"\"\n\n def __init__(self, init_cfg=None, **kwargs):\n super()....
from mmseg.registry import MODELS from mmcv.cnn.bricks.transformer import build_positional_encoding, build_transformer_layer from mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention from mmengine.model import ModuleList from torch.nn.init import normal_ from mmengine.logging import MMLogger from ..base_encoder import BaseEncoder from ..bevformer.utils import point_sampling from .utils import get_cross_view_ref_points from ..bevformer.mappings import GridMeterMapping from ..bevformer.attention import BEVCrossAttention, BEVDeformableAttention from .attention import TPVCrossAttention, CrossViewHybridAttention from .modules import CameraAwareSE import torch.nn as nn, torch, copy
11,884
self.camera_aware = camera_aware if camera_aware: if camera_aware_mid_channels is None: camera_aware_mid_channels = embed_dims self.camera_se_net = CameraAwareSE( embed_dims, camera_aware_mid_channels, embed_dims) self.mapping = GridMeterMapping( # bev_inner, # bev_outer, # range_inner, # range_outer, # nonlinear_mode, # z_inner, # z_outer, # z_ranges **mapping_args) size_h = self.mapping.size_h size_w = self.mapping.size_w size_d = self.mapping.size_d hw_grid = torch.stack( [torch.arange(size_h, dtype=torch.float).unsqueeze(-1).expand(-1, size_w), torch.arange(size_w, dtype=torch.float).unsqueeze(0).expand(size_h, -1), torch.zeros(size_h, size_w)], dim=-1) hw_meter = self.mapping.grid2meter(hw_grid)[..., [0, 1]] zh_grid = torch.stack( [torch.arange(size_h, dtype=torch.float).unsqueeze(0).expand(size_d, -1), torch.zeros(size_d, size_h), torch.arange(size_d, dtype=torch.float).unsqueeze(-1).expand(-1, size_h)], dim=-1) zh_meter = self.mapping.grid2meter(zh_grid)[..., [1, 2]] wz_grid = torch.stack( [torch.zeros(size_w, size_d), torch.arange(size_w, dtype=torch.float).unsqueeze(-1).expand(-1, size_d), torch.arange(size_d, dtype=torch.float).unsqueeze(0).expand(size_w, -1)], dim=-1) wz_meter = self.mapping.grid2meter(wz_grid)[..., [0, 2]] positional_encoding.update({'tpv_meters': [hw_meter, zh_meter, wz_meter]}) self.positional_encoding = build_positional_encoding(positional_encoding) self.tpv_size = [size_h, size_w, size_d] # transformer layers if isinstance(transformerlayers, dict): transformerlayers = [ copy.deepcopy(transformerlayers) for _ in range(num_layers)] else: assert isinstance(transformerlayers, list) and \ len(transformerlayers) == num_layers self.num_layers = num_layers self.layers = ModuleList() for i in range(num_layers): self.layers.append(build_transformer_layer(transformerlayers[i])) self.pre_norm = self.layers[0].pre_norm logger.info('use pre_norm: ' + str(self.pre_norm)) # other learnable embeddings self.level_embeds = nn.Parameter( torch.randn(self.num_feature_levels, self.embed_dims)) self.cams_embeds = nn.Parameter( torch.randn(self.num_cams, self.embed_dims)) # prepare reference points used in image cross-attention and cross-view hybrid-attention self.num_points_cross = num_points_cross self.num_points_self = num_points_self uniform_d = torch.linspace(0, size_d - 1, num_points_cross[2]) hw_3d_grid = torch.cat([ hw_grid[..., [0, 1]].unsqueeze(2).expand(-1, -1, num_points_cross[2], -1), uniform_d.reshape(1, 1, -1, 1).expand(size_h, size_w, -1, -1)], dim=-1) ref_3d_hw = self.mapping.grid2meter(hw_3d_grid) # H, W, P0, 3 uniform_w = torch.linspace(0, size_w - 1, num_points_cross[1]) zh_3d_grid = torch.cat([ zh_grid[..., :1].unsqueeze(2).expand(-1, -1, num_points_cross[1], -1), uniform_w.reshape(1, 1, -1, 1).expand(size_d, size_h, -1, -1), zh_grid[..., 2:].unsqueeze(2).expand(-1, -1, num_points_cross[1], -1) ], dim=-1) ref_3d_zh = self.mapping.grid2meter(zh_3d_grid) # Z, H, P1, 3 uniform_h = torch.linspace(0, size_h - 1, num_points_cross[0]) wz_3d_grid = torch.cat([ uniform_h.reshape(1, 1, -1, 1).expand(size_w, size_d, -1, -1), wz_grid[..., [1, 2]].unsqueeze(2).expand(-1, -1, num_points_cross[0], -1) ], dim=-1) ref_3d_wz = self.mapping.grid2meter(wz_3d_grid) # W, Z, P2, 3 self.register_buffer('ref_3d_hw', ref_3d_hw.flatten(0, 1).transpose(0, 1), False) self.register_buffer('ref_3d_zh', ref_3d_zh.flatten(0, 1).transpose(0, 1), False) self.register_buffer('ref_3d_wz', ref_3d_wz.flatten(0, 1).transpose(0, 1), False) cross_view_ref_points = get_cross_view_ref_points(size_h, size_w, size_d, num_points_self) self.register_buffer('cross_view_ref_points', cross_view_ref_points, False) # hw_grid_normed = hw_grid[..., [0, 1]].clone() # hw_grid_normed[..., 0] = hw_grid_normed[..., 0] / (size_h - 1) # hw_grid_normed[..., 1] = hw_grid_normed[..., 1] / (size_w - 1) # zh_grid_normed = zh_grid[..., [2, 0]].clone() # zh_grid_normed[..., 0] = zh_grid_normed[..., 0] / (size_d - 1) # zh_grid_normed[..., 1] = zh_grid_normed[..., 1] / (size_h - 1) # wz_grid_normed = wz_grid[..., [1, 2]].clone() # wz_grid_normed[..., 0] = wz_grid_normed[..., 0] / (size_w - 1) # wz_grid_normed[..., 1] = wz_grid_normed[..., 1] / (size_d - 1) # self.register_buffer('ref_2d_hw', hw_grid_normed, False) # H, W, 2 # self.register_buffer('ref_2d_zh', zh_grid_normed, False) # H, W, 2 # self.register_buffer('ref_2d_wz', wz_grid_normed, False) # H, W, 2 def init_weights(self): """Initialize the transformer weights.""" for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) for m in self.modules():
logger = MMLogger.get_instance('selfocc') @MODELS.register_module() class TPVFormerEncoder(BaseEncoder): def __init__( self, mapping_args: dict, # bev_inner=128, # bev_outer=32, # range_inner=51.2, # range_outer=51.2, # nonlinear_mode='linear_upscale', # z_inner=20, # z_outer=10, # z_ranges=[-5.0, 3.0, 11.0], embed_dims=128, num_cams=6, num_feature_levels=4, positional_encoding=None, num_points_cross=[64, 64, 8], num_points_self=[16, 16, 16], transformerlayers=None, num_layers=None, camera_aware=False, camera_aware_mid_channels=None, init_cfg=None): super().__init__(init_cfg) # self.bev_inner = bev_inner # self.bev_outer = bev_outer # self.range_inner = range_inner # self.range_outer = range_outer # assert nonlinear_mode == 'linear_upscale' # TODO # self.nonlinear_mode = nonlinear_mode # self.z_inner = z_inner # self.z_outer = z_outer # self.z_ranges = z_ranges self.embed_dims = embed_dims self.num_feature_levels = num_feature_levels self.num_cams = num_cams self.camera_aware = camera_aware if camera_aware: if camera_aware_mid_channels is None: camera_aware_mid_channels = embed_dims self.camera_se_net = CameraAwareSE( embed_dims, camera_aware_mid_channels, embed_dims) self.mapping = GridMeterMapping( # bev_inner, # bev_outer, # range_inner, # range_outer, # nonlinear_mode, # z_inner, # z_outer, # z_ranges **mapping_args) size_h = self.mapping.size_h size_w = self.mapping.size_w size_d = self.mapping.size_d hw_grid = torch.stack( [torch.arange(size_h, dtype=torch.float).unsqueeze(-1).expand(-1, size_w), torch.arange(size_w, dtype=torch.float).unsqueeze(0).expand(size_h, -1), torch.zeros(size_h, size_w)], dim=-1) hw_meter = self.mapping.grid2meter(hw_grid)[..., [0, 1]] zh_grid = torch.stack( [torch.arange(size_h, dtype=torch.float).unsqueeze(0).expand(size_d, -1), torch.zeros(size_d, size_h), torch.arange(size_d, dtype=torch.float).unsqueeze(-1).expand(-1, size_h)], dim=-1) zh_meter = self.mapping.grid2meter(zh_grid)[..., [1, 2]] wz_grid = torch.stack( [torch.zeros(size_w, size_d), torch.arange(size_w, dtype=torch.float).unsqueeze(-1).expand(-1, size_d), torch.arange(size_d, dtype=torch.float).unsqueeze(0).expand(size_w, -1)], dim=-1) wz_meter = self.mapping.grid2meter(wz_grid)[..., [0, 2]] positional_encoding.update({'tpv_meters': [hw_meter, zh_meter, wz_meter]}) self.positional_encoding = build_positional_encoding(positional_encoding) self.tpv_size = [size_h, size_w, size_d] # transformer layers if isinstance(transformerlayers, dict): transformerlayers = [ copy.deepcopy(transformerlayers) for _ in range(num_layers)] else: assert isinstance(transformerlayers, list) and \ len(transformerlayers) == num_layers self.num_layers = num_layers self.layers = ModuleList() for i in range(num_layers): self.layers.append(build_transformer_layer(transformerlayers[i])) self.pre_norm = self.layers[0].pre_norm logger.info('use pre_norm: ' + str(self.pre_norm)) # other learnable embeddings self.level_embeds = nn.Parameter( torch.randn(self.num_feature_levels, self.embed_dims)) self.cams_embeds = nn.Parameter( torch.randn(self.num_cams, self.embed_dims)) # prepare reference points used in image cross-attention and cross-view hybrid-attention self.num_points_cross = num_points_cross self.num_points_self = num_points_self uniform_d = torch.linspace(0, size_d - 1, num_points_cross[2]) hw_3d_grid = torch.cat([ hw_grid[..., [0, 1]].unsqueeze(2).expand(-1, -1, num_points_cross[2], -1), uniform_d.reshape(1, 1, -1, 1).expand(size_h, size_w, -1, -1)], dim=-1) ref_3d_hw = self.mapping.grid2meter(hw_3d_grid) # H, W, P0, 3 uniform_w = torch.linspace(0, size_w - 1, num_points_cross[1]) zh_3d_grid = torch.cat([ zh_grid[..., :1].unsqueeze(2).expand(-1, -1, num_points_cross[1], -1), uniform_w.reshape(1, 1, -1, 1).expand(size_d, size_h, -1, -1), zh_grid[..., 2:].unsqueeze(2).expand(-1, -1, num_points_cross[1], -1) ], dim=-1) ref_3d_zh = self.mapping.grid2meter(zh_3d_grid) # Z, H, P1, 3 uniform_h = torch.linspace(0, size_h - 1, num_points_cross[0]) wz_3d_grid = torch.cat([ uniform_h.reshape(1, 1, -1, 1).expand(size_w, size_d, -1, -1), wz_grid[..., [1, 2]].unsqueeze(2).expand(-1, -1, num_points_cross[0], -1) ], dim=-1) ref_3d_wz = self.mapping.grid2meter(wz_3d_grid) # W, Z, P2, 3 self.register_buffer('ref_3d_hw', ref_3d_hw.flatten(0, 1).transpose(0, 1), False) self.register_buffer('ref_3d_zh', ref_3d_zh.flatten(0, 1).transpose(0, 1), False) self.register_buffer('ref_3d_wz', ref_3d_wz.flatten(0, 1).transpose(0, 1), False) cross_view_ref_points = get_cross_view_ref_points(size_h, size_w, size_d, num_points_self) self.register_buffer('cross_view_ref_points', cross_view_ref_points, False) # hw_grid_normed = hw_grid[..., [0, 1]].clone() # hw_grid_normed[..., 0] = hw_grid_normed[..., 0] / (size_h - 1) # hw_grid_normed[..., 1] = hw_grid_normed[..., 1] / (size_w - 1) # zh_grid_normed = zh_grid[..., [2, 0]].clone() # zh_grid_normed[..., 0] = zh_grid_normed[..., 0] / (size_d - 1) # zh_grid_normed[..., 1] = zh_grid_normed[..., 1] / (size_h - 1) # wz_grid_normed = wz_grid[..., [1, 2]].clone() # wz_grid_normed[..., 0] = wz_grid_normed[..., 0] / (size_w - 1) # wz_grid_normed[..., 1] = wz_grid_normed[..., 1] / (size_d - 1) # self.register_buffer('ref_2d_hw', hw_grid_normed, False) # H, W, 2 # self.register_buffer('ref_2d_zh', zh_grid_normed, False) # H, W, 2 # self.register_buffer('ref_2d_wz', wz_grid_normed, False) # H, W, 2 def init_weights(self): """Initialize the transformer weights.""" for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) for m in self.modules():
if isinstance(m, BEVCrossAttention) or \
4
2023-11-20 12:49:14+00:00
16k
MobileTeleSystems/CoolGraph
cool_graph/runners.py
[ { "identifier": "RawDataProcessor", "path": "cool_graph/data/data_processor.py", "snippet": "class RawDataProcessor:\n \"\"\"\n Preprocessing datasets.\n\n Args:\n groups_names (Dict[int, str]): Name of groups in nodes.\n group_names_node_features (Dict[str, List[str]]): Name of f...
import os import pathlib import hydra import numpy as np import optuna import pandas as pd import torch from datetime import datetime from itertools import product from pathlib import Path from typing import Dict, List, Literal, Optional from hydra import ( compose, core, initialize, initialize_config_dir, initialize_config_module, ) from omegaconf import DictConfig, OmegaConf from optuna.trial import TrialState from sklearn.model_selection import train_test_split from torch_geometric.data import Data from torch_geometric.loader import NeighborLoader, NeighborSampler from tqdm import tqdm from cool_graph.data import RawDataProcessor from cool_graph.data.batch import get_auto_batch_size from cool_graph.data.loaders import create_loaders from cool_graph.logging import setup_mlflow_from_config from cool_graph.parameter_search import ( model_params_to_trial_params, sample_model_params, ) from cool_graph.train import Trainer
12,129
>>> runner = HypeRunner(data) >>> result = runner.run(optimize_run) Study statistics: Number of finished trials: 5 Number of complete trials: 5 Best trial: Value: 0.922 Params: {'conv_type': 'GraphConv', 'activation': 'leakyrelu', 'lin_prep_len': 1, 'lin_prep_dropout_rate': 0.4, 'lin_prep_weight_norm_flag': True, 'lin_prep_size_common': 512, 'lin_prep_sizes': [256], 'n_hops': 2, 'conv1_aggrs': {'mean': 128, 'max': 64, 'add': 32}, 'conv1_dropout_rate': 0.2, 'conv2_aggrs': {'mean': 64, 'max': 32, 'add': 16}, 'conv2_dropout_rate': 0.2, 'graph_conv_weight_norm_flag': True} """ def __init__( self, data: Data, config: Optional[DictConfig] = None, config_path: Optional[str] = None, overrides: Optional[List] = None, train_size: Optional[int] = None, test_size: Optional[int] = None, seed: Optional[int] = None, train_idx: Optional[List[int]] = None, test_idx: Optional[List[int]] = None, ): super().__init__( data, config, config_path, overrides, train_size, test_size, seed, train_idx, test_idx, ) if config is None: if config_path is None: config_path = os.path.join( os.path.dirname(__file__), "./config/in_memory_data.yaml" ) config = create_cfg( config=config_path, overrides=overrides, path_base="cfg" ) self.study = optuna.study def optimize_run( self, n_trials: int = 100, storage: Optional[str] = None, study_name: Optional[str] = None, enqueue_trial: Optional[List[Dict]] = None, ) -> pd.DataFrame: if not (hasattr(self, "train_loader") and hasattr(self, "test_loader")): self.init_loaders() """ Method for running objective function in Optuna. Args: n_trials (int, optional): The number of trials for each process. None represents no limit in terms of the number of trials. Defaults to 100. storage (Optional[str], optional): Database URL. If this argument is set to None, in-memory storage is used, and the Study will not be persistent. Defaults to None. study_name (Optional[str], optional): Study name. If this argument is set to None, a unique name is generated automatically. Defaults to None. enqueue_trial (Optional[List[Dict]], optional): Enqueue a trial with given parameter values. Defaults to None. Returns: trials_dataset (pd.DataFrame): Result dataframe with trial params. """ list_with_params = [] def objective(trial) -> float: self.cfg["model_params"] = sample_model_params( trial, conv_type=self.cfg["model_params"]["conv_type"] ) list_with_params.append(self.cfg["model_params"]) self.trainer = Trainer( self.train_loader, self.test_loader, self.chkpt_dir, device=self.cfg["training"]["device"], eval_freq=self.cfg["training"]["eval_freq"], fill_value=self.cfg["training"]["loss"].get("fill_value"), initial_lr=self.cfg["training"].get("initial_lr", 0.01), weight_decay=self.cfg["training"].get("weight_decay", 0.0), loss_name=self.cfg["training"]["loss"]["name"], loss_label_smoothing=self.cfg["training"]["loss"].get( "label_smoothing", False ), loss_target_weights=self.target_weights, loss_group_weights=self.cfg["training"]["loss"].get("group_weights"), groups_names=self.groups_names, mlflow_experiment_name=self.cfg["logging"].get( "mlflow_experiment_name" ), n_epochs=self.cfg["training"].get("n_epochs"), scheduler_params=self.cfg["training"].get("scheduler_params", {}), scheduler_type=self.cfg["training"].get("scheduler_type"), target_names=self.target_names, use_mlflow=self.cfg["logging"].get("use_mlflow", False), tqdm_disable=False, target_sizes=self.target_sizes, **self.cfg["model_params"], groups_names_num_features=self.groups_names_num_features, num_edge_features=self.num_edge_features, metrics=self.metrics, log_all_metrics=False, ) result = self.trainer.train() output = result["best_loss"]["main_metric"] output = round(output, 3) return output # default params for the 1st trial in Optuna optimization
def create_cfg(config: str, overrides: List[str], path_base: str = "cfg") -> Dict: assert path_base in ("cfg", "cwd") core.global_hydra.GlobalHydra.instance().clear() if os.path.isabs(config): config_path = pathlib.Path(config).parent else: config_path = pathlib.Path(os.getcwd()) / pathlib.Path(config).parent config_name = pathlib.Path(config).name.replace(".yaml", "") initialize_config_dir(str(config_path), version_base=None) cfg = compose(config_name=config_name, overrides=overrides) return cfg class ConfigRunner: r"""Runner for cli mode. Using only in cli. This class allows to load data + split data per batchs + split data per train/val + training. See the config full.yaml in ./config for knowing what excactly using as data/logging/model_params/training/metrics. You can use default params, but also you can change it. Steps for changing confis: - make get_config --configs path_where_you_need_configs (default: new path ./configs by itself) """ def __init__(self, config: Optional[DictConfig]) -> None: cfg = OmegaConf.to_container(config, resolve=True) self.cfg = cfg self.target_names = cfg["training"]["targets"] self.groups_names = cfg["data"]["groups_names"] self.target_weights = cfg["training"]["loss"]["target_weights"] self.read_edge_attr = cfg["data"].get("read_edge_attr", True) self.batch_size = cfg["training"]["batch_size"] self.group_mask_col = cfg["data"]["group_mask_col"] self.label_mask_col = cfg["data"]["label_mask_col"] self.label_cols = cfg["data"]["label_cols"] self.label_index_col = cfg["data"]["label_index_col"] self.edge_index_cols = cfg["data"]["edge_index_cols"] self.num_neighbors = cfg["training"]["num_neighbors"] self.features_edges_names = cfg["data"].get("features_edges") self.group_names_node_features = cfg["data"]["features"] self.train_paths = cfg["data"]["train"] self.val_paths = cfg["data"]["validation"] self.metrics = cfg["metrics"] self.chkpt_dir = ( pathlib.Path(cfg["logging"]["checkpoint_dir"]) / str(datetime.now())[:19] ) os.makedirs(self.chkpt_dir, exist_ok=True) if self.cfg["logging"].get("use_mlflow", False): setup_mlflow_from_config(cfg["logging"]["mlflow"]) def init_loaders(self) -> None: """ Using RawDataProcessor from cool_graph.data for preprocessing data from disk. """ self.train_sampler = RawDataProcessor( self.groups_names, self.group_names_node_features, mon_nodes_path=self.train_paths["nodes_path"], mon_edges_path=self.train_paths["edges_path"], mon_labels_path=self.train_paths["labels_path"], edge_index_cols=self.edge_index_cols, label_index_col=self.label_index_col, label_mask_col=self.label_mask_col, read_edge_attr=self.read_edge_attr, group_mask_col=self.group_mask_col, features_edges_names=self.features_edges_names, label_cols=self.label_cols, target_names=self.target_names, ) self.val_sampler = RawDataProcessor( self.groups_names, self.group_names_node_features, mon_nodes_path=self.val_paths["nodes_path"], mon_edges_path=self.val_paths["edges_path"], mon_labels_path=self.val_paths["labels_path"], edge_index_cols=self.edge_index_cols, label_index_col=self.label_index_col, label_mask_col=self.label_mask_col, read_edge_attr=self.read_edge_attr, group_mask_col=self.group_mask_col, features_edges_names=self.features_edges_names, label_cols=self.label_cols, target_names=self.target_names, ) def sample_data( self, seed=0 ) -> Dict[Literal["train", "validation"], List[torch.utils.data.DataLoader]]: """ Sampling data in batches. """ if self.batch_size == "auto": self._batch_size = get_auto_batch_size( [len(v) for _, v in self.group_names_node_features.items()], conv_type=self.cfg["model_params"]["conv_type"], conv1_aggrs=self.cfg["model_params"]["conv1_aggrs"], conv2_aggrs=self.cfg["model_params"].get("conv2_aggrs"), conv3_aggrs=self.cfg["model_params"].get("conv3_aggrs"), n_hops=self.cfg["model_params"]["n_hops"], lin_prep_size_common=self.cfg["model_params"]["lin_prep_size_common"], lin_prep_sizes=self.cfg["model_params"]["lin_prep_sizes"], edge_attr_repr_sizes=self.cfg["model_params"].get( "edge_attr_repr_sizes" ), num_edge_features=len(self.cfg["data"].get("features_edges", [])), device=self.cfg["training"]["device"], num_neighbors=self.cfg["training"]["num_neighbors"], ) else: self._batch_size = self.batch_size train_loaders = self.train_sampler.sample_data( self.num_neighbors, self._batch_size, seed=seed ) val_loaders = self.val_sampler.sample_data( self.num_neighbors, self._batch_size, seed=seed ) return {"train": train_loaders, "validation": val_loaders} def run(self, seed: int = 0) -> Dict[str, float]: """ Train model for train_samples and val_sampler. Args: seed (int): seed for training. Default to 0. Returns: result (dict): Result of training for each 5 epochs with metrics from config. """ if not (hasattr(self, "train_sampler") and hasattr(self, "val_sampler")): self.init_loaders() sampled = self.sample_data(seed=seed) train_loaders = sampled["train"] val_loaders = sampled["validation"] self.trainer = Trainer( train_loaders, val_loaders, self.chkpt_dir, device=self.cfg["training"]["device"], eval_freq=self.cfg["training"]["eval_freq"], fill_value=self.cfg["training"]["loss"].get("fill_value"), initial_lr=self.cfg["training"].get("initial_lr", 0.01), weight_decay=self.cfg["training"].get("weight_decay", 0.0), loss_name=self.cfg["training"]["loss"]["name"], loss_label_smoothing=self.cfg["training"]["loss"].get( "label_smoothing", False ), loss_target_weights=self.cfg["training"]["loss"].get("target_weights"), loss_group_weights=self.cfg["training"]["loss"].get("group_weights"), groups_names=self.cfg["data"]["groups_names"], mlflow_experiment_name=self.cfg["logging"].get("mlflow_experiment_name"), n_epochs=self.cfg["training"].get("n_epochs"), scheduler_params=self.cfg["training"].get("scheduler_params", {}), scheduler_type=self.cfg["training"].get("scheduler_type"), target_names=self.cfg["training"]["targets"], use_mlflow=self.cfg["logging"].get("use_mlflow", False), tqdm_disable=False, **self.cfg["model_params"], groups_names_num_features={ k: len(v) for k, v in self.group_names_node_features.items() }, num_edge_features=len(self.cfg["data"].get("features_edges", [])), metrics=self.metrics, ) result = self.trainer.train() return result class BaseRunner: def __init__( self, data: Data, config: Optional[DictConfig] = None, config_path: Optional[str] = None, overrides: Optional[List] = None, train_size: Optional[int] = None, test_size: Optional[int] = None, seed: Optional[int] = None, train_idx: Optional[List[int]] = None, test_idx: Optional[List[int]] = None, use_edge_attr: bool = False, **kwargs, ) -> None: """ Main class for Basic runner and Runner with Optuna. Args: data (Data): A data object describing a homogeneous graph. The data object can hold node-level, link-level and graph-level attributes. In general, Data tries to mimic the behavior of a regular Python dictionary. In addition, it provides useful functionality for analyzing graph structures, and provides basic PyTorch tensor functionalities. https://pytorch-geometric.readthedocs.io/en/latest/get_started/introduction.html#data-handling-of-graphs config (DictConfig): Config. Defaults to None. config_path (str): Path to config. Defaults to None. overrides (list): Own params. Can ba params from configs and overrides. Defaults to None. train_size (int): Size for train data. Defaults to None. test_size (int): Size for test data. Defaults to None. seed (int): Seed param for training. Defaults to None. train_idx (list): Indices for train data. Defaults to None. test_idx (list): Indices for test data. Defaults to None. use_edge_attr (bool): If attributes exist on edges, it can be used in training. Defaults to False. """ if config is None: if config_path is None: if use_edge_attr: config_path = "./config/in_memory_data2.yaml" else: config_path = "./config/in_memory_data.yaml" config_path = os.path.join(os.path.dirname(__file__), config_path) config = create_cfg( config=config_path, overrides=overrides, path_base="cfg" ) cfg = OmegaConf.to_container(config, resolve=True) self.data = data self.cfg = cfg self.test_size = test_size self.train_size = train_size self.seed = seed self.train_idx = train_idx self.test_idx = test_idx self.use_edge_attr = use_edge_attr if use_edge_attr and data.edge_attr is None: raise BaseException( "data does not contain edge_attr, please set use_edge_attr=False" ) self.target_names = cfg["training"]["targets"] self.target_weights = cfg["training"]["loss"]["target_weights"] self.batch_size = cfg["training"]["batch_size"] self.num_neighbors = cfg["training"]["num_neighbors"] self.metrics = cfg["metrics"] self.data.group_mask = torch.zeros(len(data.x), dtype=torch.int8) self.data.label_mask = torch.ones(len(data.x), dtype=torch.bool) self.groups_names = {0: "x"} self.groups_names_num_features = {"x": data.x.shape[1]} if len(data.y.shape) == 2: self.target_sizes = [] self.target_names = [] self.target_weights = {} for i in range(data.y.shape[1]): y_sub = data.y[:, i] setattr(data, f"y{i}", y_sub) self.target_sizes.append(len(y_sub.unique())) self.target_names.append(f"y{i}") self.target_weights[f"y{i}"] = 1 else: self.target_names = ["y"] self.target_sizes = [len(data.y.unique())] self.target_weights = {"y": 1} if use_edge_attr: self.num_edge_features = data.edge_attr.shape[1] else: self.num_edge_features = 0 self.chkpt_dir = ( pathlib.Path(cfg["logging"]["checkpoint_dir"]) / str(datetime.now())[:19] ) for k, v in kwargs.items(): setattr(self, k, v) if self.cfg["logging"].get("use_mlflow", False): setup_mlflow_from_config(cfg["logging"]["mlflow"]) def init_loaders(self) -> None: """ Sampling data into batches and sampling data with NeighborLoader into list loaders. """ if self.batch_size == "auto": self._batch_size = get_auto_batch_size( [ self.groups_names_num_features[self.groups_names[i]] for i in range(len(self.groups_names)) ], conv_type=self.cfg["model_params"]["conv_type"], conv1_aggrs=self.cfg["model_params"]["conv1_aggrs"], conv2_aggrs=self.cfg["model_params"].get("conv2_aggrs"), conv3_aggrs=self.cfg["model_params"].get("conv3_aggrs"), n_hops=self.cfg["model_params"]["n_hops"], lin_prep_size_common=self.cfg["model_params"]["lin_prep_size_common"], lin_prep_sizes=self.cfg["model_params"]["lin_prep_sizes"], edge_attr_repr_sizes=self.cfg["model_params"].get( "edge_attr_repr_sizes" ), num_edge_features=self.num_edge_features, device=self.cfg["training"]["device"], num_neighbors=self.num_neighbors, ) else: self._batch_size = self.batch_size if (self.train_idx is None) or (self.test_idx is None): train_idx, test_idx = train_test_split( torch.nonzero(self.data.label_mask)[:, 0], train_size=self.train_size, test_size=self.test_size, random_state=self.seed, shuffle=True, ) self.train_idx = train_idx self.test_idx = test_idx def sample_date_prerpoc(sampled_data: Data) -> Data: sampled_data.label_mask[sampled_data.batch_size :] = False for group, name in self.groups_names.items(): x = getattr(sampled_data, name)[sampled_data.group_mask == group] setattr(sampled_data, name, x) return sampled_data loader_train = NeighborLoader( self.data, num_neighbors=self.num_neighbors, batch_size=self._batch_size, shuffle=True, input_nodes=self.train_idx, ) list_loader_train = [] for sampled_data in tqdm(loader_train, desc="Sample data"): list_loader_train.append(sample_date_prerpoc(sampled_data)) self.train_loader = list_loader_train loader_test = NeighborLoader( self.data, num_neighbors=self.num_neighbors, batch_size=self._batch_size, shuffle=True, input_nodes=self.test_idx, ) list_loader_test = [] for sampled_data in tqdm(loader_test, desc="Sample data"): list_loader_test.append(sample_date_prerpoc(sampled_data)) self.test_loader = list_loader_test class Runner(BaseRunner): """ Runner for notebook launch. Args: data (Data): A data object describing a homogeneous graph. The data object can hold node-level, link-level and graph-level attributes. In general, Data tries to mimic the behavior of a regular Python dictionary. In addition, it provides useful functionality for analyzing graph structures, and provides basic PyTorch tensor functionalities. https://pytorch-geometric.readthedocs.io/en/latest/get_started/introduction.html#data-handling-of-graphs config (DictConfig): Config. Defaults to None. config_path (str): Path to config. Defaults to None. overrides (list): Own params. Can ba params from configs and overrides. Defaults to None. train_size (int): Size for train data. Defaults to None. test_size (int): Size for test data. Defaults to None. seed (int): Seed param for training. Defaults to None. train_idx (int): Indices for train data. Defaults to None. test_idx (int): Indices for test data. Defaults to None. use_edge_attr (bool): If attributes exist on edges, it can be used in training. Defaults to False. Examples -------- >>> from cool_graph.runners import Runner >>> from torch_geometric import datasets >>> # loading amazon dataset >>> data = datasets.Amazon(root="./data/Amazon", name="Computers").data >>> runner = Runner(data) >>> result = runner.run() >>> result["best_loss"] {'accuracy': 0.916, 'cross_entropy': 0.286, 'f1_micro': 0.916, 'calc_time': 0.004, 'main_metric': 0.916, 'epoch': 10} Also you can override params in Runner: runner = Runner(data, metrics=['accuracy'], batch_size='auto', train_size=0.7, test_size=0.3, overrides=['training.n_epochs=1'], config_path=path/to/config) result = runner.run() """ def __init__( self, data: Data, config: Optional[DictConfig] = None, config_path: Optional[str] = None, overrides: Optional[List] = None, train_size: Optional[int] = None, test_size: Optional[int] = None, seed: Optional[int] = None, train_idx: Optional[List[int]] = None, test_idx: Optional[List[int]] = None, use_edge_attr: bool = False, **kwargs, ): super().__init__( data, config, config_path, overrides, train_size, test_size, seed, train_idx, test_idx, use_edge_attr, **kwargs, ) def run(self) -> Dict[str, float]: """ Training model with params in_memory_data/in_memory_data2 config. See the configs in ./config for knowing what excactly using as logging/model_params/training/metrics. You can use default params, but also you can change it. Steps for changing confis: - make get_config --configs path_where_you_need_configs (default: new path ./configs by itself) """ if not (hasattr(self, "train_loader") and hasattr(self, "test_loader")): self.init_loaders() self.trainer = Trainer( self.train_loader, self.test_loader, self.chkpt_dir, device=self.cfg["training"]["device"], eval_freq=self.cfg["training"]["eval_freq"], fill_value=self.cfg["training"]["loss"].get("fill_value"), initial_lr=self.cfg["training"].get("initial_lr", 0.01), weight_decay=self.cfg["training"].get("weight_decay", 0.0), loss_name=self.cfg["training"]["loss"]["name"], loss_label_smoothing=self.cfg["training"]["loss"].get( "label_smoothing", False ), loss_target_weights=self.target_weights, loss_group_weights=self.cfg["training"]["loss"].get("group_weights"), groups_names=self.groups_names, mlflow_experiment_name=self.cfg["logging"].get("mlflow_experiment_name"), n_epochs=self.cfg["training"].get("n_epochs"), scheduler_params=self.cfg["training"].get("scheduler_params", {}), scheduler_type=self.cfg["training"].get("scheduler_type"), target_names=self.target_names, use_mlflow=self.cfg["logging"].get("use_mlflow", False), tqdm_disable=False, target_sizes=self.target_sizes, **self.cfg["model_params"], groups_names_num_features=self.groups_names_num_features, num_edge_features=self.num_edge_features, metrics=self.metrics, log_all_metrics=False, ) result = self.trainer.train() return result class HypeRunner(BaseRunner): """ Runner for optimization model with Optuna. https://optuna.readthedocs.io/en/stable/reference/index.html 1st trial - with default config params (hyper_params). Also, 2nd trial - you can add own trial as argument enqueue_trial in optimazire_run method, and next trial optuna optimize model params randomly, if set None randomly optimization after 1st default trial. Args: data (Data): Loaded dataset. config (DictConfig): Confif with patams (model_params, logging, training, metrics). Default to None. config_path (str): Path with config structure (can be loaded with cli get_config). Default to None. overrides (list): Own params in list. Default to None. train_size (int): Own train size. Default to None. test (int): Own test size. Default to None. seed (int): The desired seed. Default to None. train_idx (list): List of train indices. test_idx (list): List of test indices. Examples -------- >>> from cool_graph.runners import HypeRunner >>> from torch_geometric import datasets >>> # loading amazon dataset >>> data = datasets.Amazon(root="./data/Amazon", name="Computers").data >>> runner = HypeRunner(data) >>> result = runner.run(optimize_run) Study statistics: Number of finished trials: 5 Number of complete trials: 5 Best trial: Value: 0.922 Params: {'conv_type': 'GraphConv', 'activation': 'leakyrelu', 'lin_prep_len': 1, 'lin_prep_dropout_rate': 0.4, 'lin_prep_weight_norm_flag': True, 'lin_prep_size_common': 512, 'lin_prep_sizes': [256], 'n_hops': 2, 'conv1_aggrs': {'mean': 128, 'max': 64, 'add': 32}, 'conv1_dropout_rate': 0.2, 'conv2_aggrs': {'mean': 64, 'max': 32, 'add': 16}, 'conv2_dropout_rate': 0.2, 'graph_conv_weight_norm_flag': True} """ def __init__( self, data: Data, config: Optional[DictConfig] = None, config_path: Optional[str] = None, overrides: Optional[List] = None, train_size: Optional[int] = None, test_size: Optional[int] = None, seed: Optional[int] = None, train_idx: Optional[List[int]] = None, test_idx: Optional[List[int]] = None, ): super().__init__( data, config, config_path, overrides, train_size, test_size, seed, train_idx, test_idx, ) if config is None: if config_path is None: config_path = os.path.join( os.path.dirname(__file__), "./config/in_memory_data.yaml" ) config = create_cfg( config=config_path, overrides=overrides, path_base="cfg" ) self.study = optuna.study def optimize_run( self, n_trials: int = 100, storage: Optional[str] = None, study_name: Optional[str] = None, enqueue_trial: Optional[List[Dict]] = None, ) -> pd.DataFrame: if not (hasattr(self, "train_loader") and hasattr(self, "test_loader")): self.init_loaders() """ Method for running objective function in Optuna. Args: n_trials (int, optional): The number of trials for each process. None represents no limit in terms of the number of trials. Defaults to 100. storage (Optional[str], optional): Database URL. If this argument is set to None, in-memory storage is used, and the Study will not be persistent. Defaults to None. study_name (Optional[str], optional): Study name. If this argument is set to None, a unique name is generated automatically. Defaults to None. enqueue_trial (Optional[List[Dict]], optional): Enqueue a trial with given parameter values. Defaults to None. Returns: trials_dataset (pd.DataFrame): Result dataframe with trial params. """ list_with_params = [] def objective(trial) -> float: self.cfg["model_params"] = sample_model_params( trial, conv_type=self.cfg["model_params"]["conv_type"] ) list_with_params.append(self.cfg["model_params"]) self.trainer = Trainer( self.train_loader, self.test_loader, self.chkpt_dir, device=self.cfg["training"]["device"], eval_freq=self.cfg["training"]["eval_freq"], fill_value=self.cfg["training"]["loss"].get("fill_value"), initial_lr=self.cfg["training"].get("initial_lr", 0.01), weight_decay=self.cfg["training"].get("weight_decay", 0.0), loss_name=self.cfg["training"]["loss"]["name"], loss_label_smoothing=self.cfg["training"]["loss"].get( "label_smoothing", False ), loss_target_weights=self.target_weights, loss_group_weights=self.cfg["training"]["loss"].get("group_weights"), groups_names=self.groups_names, mlflow_experiment_name=self.cfg["logging"].get( "mlflow_experiment_name" ), n_epochs=self.cfg["training"].get("n_epochs"), scheduler_params=self.cfg["training"].get("scheduler_params", {}), scheduler_type=self.cfg["training"].get("scheduler_type"), target_names=self.target_names, use_mlflow=self.cfg["logging"].get("use_mlflow", False), tqdm_disable=False, target_sizes=self.target_sizes, **self.cfg["model_params"], groups_names_num_features=self.groups_names_num_features, num_edge_features=self.num_edge_features, metrics=self.metrics, log_all_metrics=False, ) result = self.trainer.train() output = result["best_loss"]["main_metric"] output = round(output, 3) return output # default params for the 1st trial in Optuna optimization
trial_params = model_params_to_trial_params(**self.cfg["model_params"])
4
2023-11-22 09:44:16+00:00
16k
HeliosZhao/Animate124
nerf/network_grid_tcnn.py
[ { "identifier": "trunc_exp", "path": "activation.py", "snippet": "class _trunc_exp(Function):\n def forward(ctx, x):\n def backward(ctx, g):\ndef biased_softplus(x, bias=0):" }, { "identifier": "NeRFRenderer", "path": "nerf/renderer.py", "snippet": "class NeRFRenderer(nn.Module):\n...
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import tinycudann as tcnn from activation import trunc_exp, biased_softplus from .renderer import NeRFRenderer from encoding import get_encoder from .utils import safe_normalize
13,479
class MLP(nn.Module): def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True): super().__init__() self.dim_in = dim_in self.dim_out = dim_out self.dim_hidden = dim_hidden self.num_layers = num_layers net = [] for l in range(num_layers): net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias)) self.net = nn.ModuleList(net) def forward(self, x): for l in range(self.num_layers): x = self.net[l](x) if l != self.num_layers - 1: x = F.relu(x, inplace=True) return x class NeRFNetwork(NeRFRenderer): def __init__(self, opt, num_layers=3, hidden_dim=64, num_layers_bg=2, hidden_dim_bg=32, ): super().__init__(opt) self.num_layers = num_layers self.hidden_dim = hidden_dim self.encoder = tcnn.Encoding( n_input_dims=3, encoding_config={ "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "interpolation": "Smoothstep", "per_level_scale": np.exp2(np.log2(2048 * self.bound / 16) / (16 - 1)), }, dtype=torch.float32, # ENHANCE: default float16 seems unstable... ) self.in_dim = self.encoder.n_output_dims # use torch MLP, as tcnn MLP doesn't impl second-order derivative self.sigma_net = MLP(self.in_dim, 4, hidden_dim, num_layers, bias=True) self.density_activation = trunc_exp if self.opt.density_activation == 'exp' else biased_softplus # background network if self.opt.bg_radius > 0: self.num_layers_bg = num_layers_bg self.hidden_dim_bg = hidden_dim_bg # use a very simple network to avoid it learning the prompt...
class MLP(nn.Module): def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True): super().__init__() self.dim_in = dim_in self.dim_out = dim_out self.dim_hidden = dim_hidden self.num_layers = num_layers net = [] for l in range(num_layers): net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias)) self.net = nn.ModuleList(net) def forward(self, x): for l in range(self.num_layers): x = self.net[l](x) if l != self.num_layers - 1: x = F.relu(x, inplace=True) return x class NeRFNetwork(NeRFRenderer): def __init__(self, opt, num_layers=3, hidden_dim=64, num_layers_bg=2, hidden_dim_bg=32, ): super().__init__(opt) self.num_layers = num_layers self.hidden_dim = hidden_dim self.encoder = tcnn.Encoding( n_input_dims=3, encoding_config={ "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "interpolation": "Smoothstep", "per_level_scale": np.exp2(np.log2(2048 * self.bound / 16) / (16 - 1)), }, dtype=torch.float32, # ENHANCE: default float16 seems unstable... ) self.in_dim = self.encoder.n_output_dims # use torch MLP, as tcnn MLP doesn't impl second-order derivative self.sigma_net = MLP(self.in_dim, 4, hidden_dim, num_layers, bias=True) self.density_activation = trunc_exp if self.opt.density_activation == 'exp' else biased_softplus # background network if self.opt.bg_radius > 0: self.num_layers_bg = num_layers_bg self.hidden_dim_bg = hidden_dim_bg # use a very simple network to avoid it learning the prompt...
self.encoder_bg, self.in_dim_bg = get_encoder('frequency', input_dim=3, multires=6)
2
2023-11-23 10:34:08+00:00
16k
alexzhou907/DreamPropeller
extern/ldm_zero123/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"i...
import itertools import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager, nullcontext from functools import partial from einops import rearrange, repeat from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import ( AutoencoderKL, IdentityFirstStage, VQModelInterface, ) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like, ) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl, ) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import ( count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat, )
12,692
padding=0, stride=(stride[0] // df, stride[1] // df), ) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) ) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input( self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.05, ): x = super().get_input(batch, k) T = batch["T"].to(memory_format=torch.contiguous_format).float() if bs is not None: x = x[:bs] T = T[:bs].to(self.device) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() cond_key = cond_key or self.cond_stage_key xc = super().get_input(batch, cond_key).to(self.device) if bs is not None: xc = xc[:bs] cond = {} # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") input_mask = 1 - rearrange( (random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1" ) null_prompt = self.get_learned_conditioning([""]) # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] # print('=========== xc shape ===========', xc.shape) with torch.enable_grad(): clip_emb = self.get_learned_conditioning(xc).detach() null_prompt = self.get_learned_conditioning([""]).detach() cond["c_crossattn"] = [ self.cc_projection( torch.cat( [ torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :], ], dim=-1, ) ) ] cond["c_concat"] = [ input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach() ] out = [z, cond] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out # @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, "b h w c -> b c h w").contiguous() z = 1.0 / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf ) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def get_loss(self, pred, target, mean=True): if self.loss_type == "l1": loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == "l2": if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction="none") else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError( f"Paramterization {self.parameterization} not yet supported" ) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = "train" if self.training else "val" loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f"{log_prefix}/loss": loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True ) self.log( "global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, ) if self.use_scheduler: lr = self.optimizers().param_groups[0]["lr"] self.log( "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False ) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True ) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != "__is_first_stage__" assert config != "__is_unconditional__" model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list( self, samples, desc="", force_no_decoder_quantization=False ): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization ) ) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError( f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" ) return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, "encode") and callable( self.cond_stage_model.encode ): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min( torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1 )[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip( weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip( L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"], ) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold( self, x, kernel_size, stride, uf=1, df=1 ): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting( kernel_size[0], kernel_size[1], Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf), ) fold = torch.nn.Fold( output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h * uf, w * uf ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx) ) elif df > 1 and uf == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df), ) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) ) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input( self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.05, ): x = super().get_input(batch, k) T = batch["T"].to(memory_format=torch.contiguous_format).float() if bs is not None: x = x[:bs] T = T[:bs].to(self.device) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() cond_key = cond_key or self.cond_stage_key xc = super().get_input(batch, cond_key).to(self.device) if bs is not None: xc = xc[:bs] cond = {} # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") input_mask = 1 - rearrange( (random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1" ) null_prompt = self.get_learned_conditioning([""]) # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] # print('=========== xc shape ===========', xc.shape) with torch.enable_grad(): clip_emb = self.get_learned_conditioning(xc).detach() null_prompt = self.get_learned_conditioning([""]).detach() cond["c_crossattn"] = [ self.cc_projection( torch.cat( [ torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :], ], dim=-1, ) ) ] cond["c_concat"] = [ input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach() ] out = [z, cond] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out # @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, "b h w c -> b c h w").contiguous() z = 1.0 / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf ) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim
if isinstance(self.first_stage_model, VQModelInterface):
2
2023-11-27 23:39:49+00:00
16k
CineMingle/CineMingle
Movie_Data_Capture.py
[ { "identifier": "get_data_from_json", "path": "scraper.py", "snippet": "def get_data_from_json(\n file_number: str,\n open_cc: opencc.OpenCC,\n specified_source: str, specified_url: str) -> typing.Optional[dict]:\n \n # iterate through all services and fetch the data 从网站上查询片名解...
import argparse import json import os import random import re import sys import time import shutil import typing import urllib3 import signal import platform import config from datetime import datetime, timedelta from lxml import etree from pathlib import Path from opencc import OpenCC from scraper import get_data_from_json from ADC_function import file_modification_days, get_html, parallel_download_files from number_parser import get_number from core import core_main, core_main_no_net_op, moveFailedFolder, debug_print
13,407
if debug: print('[!]Skip failed movie:', absf) continue is_sym = full_name.is_symlink() if main_mode != 3 and (is_sym or (full_name.stat().st_nlink > 1 and not conf.scan_hardlink())): # 短路布尔 符号链接不取stat(),因为符号链接可能指向不存在目标 continue # 模式不等于3下跳过软连接和未配置硬链接刮削 # 调试用0字节样本允许通过,去除小于120MB的广告'苍老师强力推荐.mp4'(102.2MB)'黑道总裁.mp4'(98.4MB)'有趣的妹子激情表演.MP4'(95MB)'有趣的臺灣妹妹直播.mp4'(15.1MB) movie_size = 0 if is_sym else full_name.stat().st_size # 同上 符号链接不取stat()及st_size,直接赋0跳过小视频检测 # if 0 < movie_size < 125829120: # 1024*1024*120=125829120 # continue if cliRE and not cliRE.search(absf) or trailerRE.search(full_name.name): continue if main_mode == 3: nfo = full_name.with_suffix('.nfo') if not nfo.is_file(): if debug: print(f"[!]Metadata {nfo.name} not found for '{absf}'") elif nfo_skip_days > 0 and file_modification_days(nfo) <= nfo_skip_days: skip_nfo_days_cnt += 1 if debug: print(f"[!]Skip movie by it's .nfo which modified within {nfo_skip_days} days: '{absf}'") continue total.append(absf) if skip_failed_cnt: print(f"[!]Skip {skip_failed_cnt} movies in failed list '{failed_list_txt_path}'.") if skip_nfo_days_cnt: print( f"[!]Skip {skip_nfo_days_cnt} movies in source folder '{source}' who's .nfo modified within {nfo_skip_days} days.") if nfo_skip_days <= 0 or not link_mode or main_mode == 3: return total # 软连接方式,已经成功削刮的也需要从成功目录中检查.nfo更新天数,跳过N天内更新过的 skip_numbers = set() success_folder = Path(conf.success_folder()).resolve() for f in success_folder.glob(r'**/*'): if not re.match(r'\.nfo$', f.suffix, re.IGNORECASE): continue if file_modification_days(f) > nfo_skip_days: continue number = get_number(False, f.stem) if not number: continue skip_numbers.add(number.lower()) rm_list = [] for f in total: n_number = get_number(False, os.path.basename(f)) if n_number and n_number.lower() in skip_numbers: rm_list.append(f) for f in rm_list: total.remove(f) if debug: print(f"[!]Skip file successfully processed within {nfo_skip_days} days: '{f}'") if len(rm_list): print( f"[!]Skip {len(rm_list)} movies in success folder '{success_folder}' who's .nfo modified within {nfo_skip_days} days.") return total def create_failed_folder(failed_folder: str): """ 新建failed文件夹 """ if not os.path.exists(failed_folder): try: os.makedirs(failed_folder) except: print(f"[-]Fatal error! Can not make folder '{failed_folder}'") os._exit(0) def rm_empty_folder(path): """ Recursively removes empty folders from a given path. This function is useful for cleaning up the directory structure by removing folders that no longer contain any files. :param path: The path where empty folders will be searched for and removed. """ abspath = os.path.abspath(path) deleted = set() for current_dir, subdirs, files in os.walk(abspath, topdown=False): try: still_has_subdirs = any(_ for subdir in subdirs if os.path.join(current_dir, subdir) not in deleted) if not any(files) and not still_has_subdirs and not os.path.samefile(path, current_dir): os.rmdir(current_dir) deleted.add(current_dir) print('[+]Deleting empty folder', current_dir) except: pass def create_data_and_move(movie_path: str, zero_op: bool, no_net_op: bool, oCC): """ Processes a movie file, generates necessary data, and moves the file to an appropriate directory based on the outcome. This function is central to the application's file processing logic, including scraping, organizing, and error handling. :param movie_path: Path of the movie file to be processed. :param zero_op: A boolean flag indicating whether to perform a dry run (no actual file operations). :param no_net_op: A boolean flag to indicate whether network operations are to be skipped. :param oCC: An OpenCC instance for language conversion, if required. """ # Normalized number, eg: 111xxx-222.mp4 -> xxx-222.mp4 skip_file_names = config.getInstance().skip_file_names() debug = config.getInstance().debug() n_number = get_number(debug, os.path.basename(movie_path)) movie_path = os.path.abspath(movie_path) # print(movie_path) for skip_name in skip_file_names: if skip_name in movie_path: print('[+]Skipping file:{}'.format(movie_path)) return if debug is True: print(f"[!] [{n_number}] As Number Processing for '{movie_path}'") if zero_op: return if n_number: if no_net_op: core_main_no_net_op(movie_path, n_number) else:
def check_update(local_version): """ Check for updates by comparing the local version of the application with the latest version available on GitHub. It fetches the latest release information from GitHub and compares the version numbers. If a new version is available, it prints out the update information. :param local_version: The current local version of the application. """ htmlcode = get_html("https://api.github.com/repos/CineMingle/CineMingle/releases/latest") data = json.loads(htmlcode) remote = int(data["tag_name"].replace(".", "")) local_version = int(local_version.replace(".", "")) if local_version < remote: print("[*]" + ("* New update " + str(data["tag_name"]) + " *").center(54)) print("[*]" + "↓ Download ↓".center(54)) print("[*]https://github.com/CineMingle/CineMingle/releases") print("[*]======================================================") def argparse_function(ver: str) -> typing.Tuple[str, str, str, str, bool, bool, str, str]: """ Parses command-line arguments and returns the parsed values. It sets up the argument parser with various options for the application and returns the parsed arguments and their values. It also loads configuration from a config file. :param ver: The version of the application, used for the version argument. :return: A tuple containing various parsed arguments and flags. """ conf = config.getInstance() parser = argparse.ArgumentParser(epilog=f"Load Config file '{conf.ini_path}'.") parser.add_argument("file", default='', nargs='?', help="Single Movie file path.") parser.add_argument("-p", "--path", default='movies', nargs='?', help="Analysis folder path.") parser.add_argument("-m", "--main-mode", default='', nargs='?', help="Main mode. 1:Scraping 2:Organizing 3:Scraping in analysis folder") parser.add_argument("-n", "--number", default='', nargs='?', help="Custom file number of single movie file.") # parser.add_argument("-C", "--config", default='config.ini', nargs='?', help="The config file Path.") parser.add_argument("-L", "--link-mode", default='', nargs='?', help="Create movie file link. 0:moving movie file, do not create link 1:soft link 2:try hard link first") default_logdir = str(Path.home() / '.mlogs') parser.add_argument("-o", "--log-dir", dest='logdir', default=default_logdir, nargs='?', help=f"""Duplicate stdout and stderr to logfiles in logging folder, default on. default folder for current user: '{default_logdir}'. Change default folder to an empty file, or use --log-dir= to turn log off.""") parser.add_argument("-q", "--regex-query", dest='regexstr', default='', nargs='?', help="python re module regex filepath filtering.") parser.add_argument("-d", "--nfo-skip-days", dest='days', default='', nargs='?', help="Override nfo_skip_days value in config.") parser.add_argument("-c", "--stop-counter", dest='cnt', default='', nargs='?', help="Override stop_counter value in config.") parser.add_argument("-R", "--rerun-delay", dest='delaytm', default='', nargs='?', help="Delay (eg. 1h10m30s or 60 (second)) time and rerun, until all movies proceed. Note: stop_counter value in config or -c must none zero.") parser.add_argument("-i", "--ignore-failed-list", action="store_true", help="Ignore failed list '{}'".format( os.path.join(os.path.abspath(conf.failed_folder()), 'failed_list.txt'))) parser.add_argument("-a", "--auto-exit", action="store_true", help="Auto exit after program complete") parser.add_argument("-g", "--debug", action="store_true", help="Turn on debug mode to generate diagnostic log for issue report.") parser.add_argument("-N", "--no-network-operation", action="store_true", help="No network query, do not get metadata, for cover cropping purposes, only takes effect when main mode is 3.") parser.add_argument("-w", "--website", dest='site', default='', nargs='?', help="Override [priority]website= in config.") parser.add_argument("-D", "--download-images", dest='dnimg', action="store_true", help="Override [common]download_only_missing_images=0 force invoke image downloading.") parser.add_argument("-C", "--config-override", dest='cfgcmd', action='append', nargs=1, help="Common use config override. Grammar: section:key=value[;[section:]key=value] eg. 'de:s=1' or 'debug_mode:switch=1' override[debug_mode]switch=1 Note:this parameters can be used multiple times") parser.add_argument("-z", "--zero-operation", dest='zero_op', action="store_true", help="""Only show job list of files and numbers, and **NO** actual operation is performed. It may help you correct wrong numbers before real job.""") parser.add_argument("-v", "--version", action="version", version=ver) parser.add_argument("-s", "--search", default='', nargs='?', help="Search number") parser.add_argument("-ss", "--specified-source", default='', nargs='?', help="specified Source.") parser.add_argument("-su", "--specified-url", default='', nargs='?', help="specified Url.") args = parser.parse_args() def set_natural_number_or_none(sk, value): if isinstance(value, str) and value.isnumeric() and int(value) >= 0: conf.set_override(f'{sk}={value}') def set_str_or_none(sk, value): if isinstance(value, str) and len(value): conf.set_override(f'{sk}={value}') def set_bool_or_none(sk, value): if isinstance(value, bool) and value: conf.set_override(f'{sk}=1') set_natural_number_or_none("common:main_mode", args.main_mode) set_natural_number_or_none("common:link_mode", args.link_mode) set_str_or_none("common:source_folder", args.path) set_bool_or_none("common:auto_exit", args.auto_exit) set_natural_number_or_none("common:nfo_skip_days", args.days) set_natural_number_or_none("advenced_sleep:stop_counter", args.cnt) set_bool_or_none("common:ignore_failed_list", args.ignore_failed_list) set_str_or_none("advenced_sleep:rerun_delay", args.delaytm) set_str_or_none("priority:website", args.site) if isinstance(args.dnimg, bool) and args.dnimg: conf.set_override("common:download_only_missing_images=0") set_bool_or_none("debug_mode:switch", args.debug) if isinstance(args.cfgcmd, list): for cmd in args.cfgcmd: conf.set_override(cmd[0]) no_net_op = False if conf.main_mode() == 3: no_net_op = args.no_network_operation if no_net_op: conf.set_override("advenced_sleep:stop_counter=0;advenced_sleep:rerun_delay=0s;face:aways_imagecut=1") return args.file, args.number, args.logdir, args.regexstr, args.zero_op, no_net_op, args.search, args.specified_source, args.specified_url class OutLogger(object): def __init__(self, logfile) -> None: self.term = sys.stdout self.log = open(logfile, "w", encoding='utf-8', buffering=1) self.filepath = logfile def __del__(self): self.close() def __enter__(self): pass def __exit__(self, *args): self.close() def write(self, msg): self.term.write(msg) self.log.write(msg) def flush(self): if 'flush' in dir(self.term): self.term.flush() if 'flush' in dir(self.log): self.log.flush() if 'fileno' in dir(self.log): os.fsync(self.log.fileno()) def close(self): if self.term is not None: sys.stdout = self.term self.term = None if self.log is not None: self.log.close() self.log = None class ErrLogger(OutLogger): def __init__(self, logfile) -> None: self.term = sys.stderr self.log = open(logfile, "w", encoding='utf-8', buffering=1) self.filepath = logfile def close(self): if self.term is not None: sys.stderr = self.term self.term = None if self.log is not None: self.log.close() self.log = None def dupe_stdout_to_logfile(logdir: str): """ Duplicates the standard output (stdout) and standard error (stderr) to log files. This function creates log files in the specified directory and redirects stdout and stderr to these files for logging purposes. :param logdir: The directory where log files will be created and saved. """ if not isinstance(logdir, str) or len(logdir) == 0: return log_dir = Path(logdir) if not log_dir.exists(): try: log_dir.mkdir(parents=True, exist_ok=True) except: pass if not log_dir.is_dir(): return # Tips for disabling logs by change directory to a same name empty regular file abslog_dir = log_dir.resolve() log_tmstr = datetime.now().strftime("%Y%m%dT%H%M%S") logfile = abslog_dir / f'mdc_{log_tmstr}.txt' errlog = abslog_dir / f'mdc_{log_tmstr}_err.txt' sys.stdout = OutLogger(logfile) sys.stderr = ErrLogger(errlog) def close_logfile(logdir: str): """ Closes the log files and restores standard output and error streams. This function is typically called at the end of the application to ensure that log files are properly closed. :param logdir: The directory where log files are saved. """ if not isinstance(logdir, str) or len(logdir) == 0 or not os.path.isdir(logdir): return # 日志关闭前保存日志路径 filepath = None try: filepath = sys.stdout.filepath except: pass sys.stdout.close() sys.stderr.close() log_dir = Path(logdir).resolve() if isinstance(filepath, Path): print(f"Log file '{filepath}' saved.") assert (filepath.parent.samefile(log_dir)) # 清理空文件 for f in log_dir.glob(r'*_err.txt'): if f.stat().st_size == 0: try: f.unlink(missing_ok=True) except: pass # 合并日志 只检测日志目录内的文本日志,忽略子目录。三天前的日志,按日合并为单个日志,三个月前的日志, # 按月合并为单个月志,去年及以前的月志,今年4月以后将之按年合并为年志 # 测试步骤: """ LOGDIR=/tmp/mlog mkdir -p $LOGDIR for f in {2016..2020}{01..12}{01..28};do;echo $f>$LOGDIR/mdc_${f}T235959.txt;done for f in {01..09}{01..28};do;echo 2021$f>$LOGDIR/mdc_2021${f}T235959.txt;done for f in {00..23};do;echo 20211001T$f>$LOGDIR/mdc_20211001T${f}5959.txt;done echo "$(ls -1 $LOGDIR|wc -l) files in $LOGDIR" # 1932 files in /tmp/mlog mdc -zgic1 -d0 -m3 -o $LOGDIR # python3 ./Movie_Data_Capture.py -zgic1 -o $LOGDIR ls $LOGDIR # rm -rf $LOGDIR """ today = datetime.today() # 第一步,合并到日。3天前的日志,文件名是同一天的合并为一份日志 for i in range(1): txts = [f for f in log_dir.glob(r'*.txt') if re.match(r'^mdc_\d{8}T\d{6}$', f.stem, re.A)] if not txts or not len(txts): break e = [f for f in txts if '_err' in f.stem] txts.sort() tmstr_3_days_ago = (today.replace(hour=0) - timedelta(days=3)).strftime("%Y%m%dT99") deadline_day = f'mdc_{tmstr_3_days_ago}' day_merge = [f for f in txts if f.stem < deadline_day] if not day_merge or not len(day_merge): break cutday = len('T235959.txt') # cut length mdc_20201201|T235959.txt for f in day_merge: try: day_file_name = str(f)[:-cutday] + '.txt' # mdc_20201201.txt with open(day_file_name, 'a', encoding='utf-8') as m: m.write(f.read_text(encoding='utf-8')) f.unlink(missing_ok=True) except: pass # 第二步,合并到月 for i in range(1): # 利用1次循环的break跳到第二步,避免大块if缩进或者使用goto语法 txts = [f for f in log_dir.glob(r'*.txt') if re.match(r'^mdc_\d{8}$', f.stem, re.A)] if not txts or not len(txts): break txts.sort() tmstr_3_month_ago = (today.replace(day=1) - timedelta(days=3 * 30)).strftime("%Y%m32") deadline_month = f'mdc_{tmstr_3_month_ago}' month_merge = [f for f in txts if f.stem < deadline_month] if not month_merge or not len(month_merge): break tomonth = len('01.txt') # cut length mdc_202012|01.txt for f in month_merge: try: month_file_name = str(f)[:-tomonth] + '.txt' # mdc_202012.txt with open(month_file_name, 'a', encoding='utf-8') as m: m.write(f.read_text(encoding='utf-8')) f.unlink(missing_ok=True) except: pass # 第三步,月合并到年 for i in range(1): if today.month < 4: break mons = [f for f in log_dir.glob(r'*.txt') if re.match(r'^mdc_\d{6}$', f.stem, re.A)] if not mons or not len(mons): break mons.sort() deadline_year = f'mdc_{today.year - 1}13' year_merge = [f for f in mons if f.stem < deadline_year] if not year_merge or not len(year_merge): break toyear = len('12.txt') # cut length mdc_2020|12.txt for f in year_merge: try: year_file_name = str(f)[:-toyear] + '.txt' # mdc_2020.txt with open(year_file_name, 'a', encoding='utf-8') as y: y.write(f.read_text(encoding='utf-8')) f.unlink(missing_ok=True) except: pass # 第四步,压缩年志 如果有压缩需求,请自行手工压缩,或者使用外部脚本来定时完成。推荐nongnu的lzip,对于 # 这种粒度的文本日志,压缩比是目前最好的。lzip -9的运行参数下,日志压缩比要高于xz -9,而且内存占用更少, # 多核利用率更高(plzip多线程版本),解压速度更快。压缩后的大小差不多是未压缩时的2.4%到3.7%左右, # 100MB的日志文件能缩小到3.7MB。 return filepath def signal_handler(*args): """ A signal handler function for handling operating system signals like Ctrl+C (SIGINT). It defines the behavior of the application when such signals are received, such as graceful termination. :param args: Variable argument list, used to handle signal information. """ print('[!]Ctrl+C detected, Exit.') os._exit(9) def sigdebug_handler(*args): """ A signal handler function specifically for toggling debug mode on or off. It alters the debug configuration based on certain system signals (like window size change in Unix systems). :param args: Variable argument list, used to handle signal information. """ conf = config.getInstance() conf.set_override(f"debug_mode:switch={int(not conf.debug())}") print(f"[!]Debug {('oFF', 'On')[int(conf.debug())]}") # 新增失败文件列表跳过处理,及.nfo修改天数跳过处理,提示跳过视频总数,调试模式(-g)下详细被跳过文件,跳过小广告 def movie_lists(source_folder, regexstr: str) -> typing.List[str]: """ Generates a list of movie file paths from the specified source folder. It filters files based on regular expressions and other criteria, such as file type and size. :param source_folder: The folder to scan for movie files. :param regexstr: A regular expression string to filter movie files. :return: A list of paths to the movie files that match the criteria. """ conf = config.getInstance() main_mode = conf.main_mode() debug = conf.debug() nfo_skip_days = conf.nfo_skip_days() link_mode = conf.link_mode() file_type = conf.media_type().lower().split(",") trailerRE = re.compile(r'-trailer\.', re.IGNORECASE) cliRE = None if isinstance(regexstr, str) and len(regexstr): try: cliRE = re.compile(regexstr, re.IGNORECASE) except: pass failed_list_txt_path = Path(conf.failed_folder()).resolve() / 'failed_list.txt' failed_set = set() if (main_mode == 3 or link_mode) and not conf.ignore_failed_list(): try: flist = failed_list_txt_path.read_text(encoding='utf-8').splitlines() failed_set = set(flist) if len(flist) != len(failed_set): # 检查去重并写回,但是不改变failed_list.txt内条目的先后次序,重复的只保留最后的 fset = failed_set.copy() for i in range(len(flist) - 1, -1, -1): fset.remove(flist[i]) if flist[i] in fset else flist.pop(i) failed_list_txt_path.write_text('\n'.join(flist) + '\n', encoding='utf-8') assert len(fset) == 0 and len(flist) == len(failed_set) except: pass if not Path(source_folder).is_dir(): print('[-]Source folder not found!') return [] total = [] # source = Path(source_folder).resolve() source = Path(source_folder) skip_failed_cnt, skip_nfo_days_cnt = 0, 0 escape_folder_set = set(re.split("[,,]", conf.escape_folder())) for full_name in source.glob(r'**/*'): if main_mode != 3 and set(full_name.parent.parts) & escape_folder_set: continue if not full_name.suffix.lower() in file_type: continue absf = str(full_name) if absf in failed_set: skip_failed_cnt += 1 if debug: print('[!]Skip failed movie:', absf) continue is_sym = full_name.is_symlink() if main_mode != 3 and (is_sym or (full_name.stat().st_nlink > 1 and not conf.scan_hardlink())): # 短路布尔 符号链接不取stat(),因为符号链接可能指向不存在目标 continue # 模式不等于3下跳过软连接和未配置硬链接刮削 # 调试用0字节样本允许通过,去除小于120MB的广告'苍老师强力推荐.mp4'(102.2MB)'黑道总裁.mp4'(98.4MB)'有趣的妹子激情表演.MP4'(95MB)'有趣的臺灣妹妹直播.mp4'(15.1MB) movie_size = 0 if is_sym else full_name.stat().st_size # 同上 符号链接不取stat()及st_size,直接赋0跳过小视频检测 # if 0 < movie_size < 125829120: # 1024*1024*120=125829120 # continue if cliRE and not cliRE.search(absf) or trailerRE.search(full_name.name): continue if main_mode == 3: nfo = full_name.with_suffix('.nfo') if not nfo.is_file(): if debug: print(f"[!]Metadata {nfo.name} not found for '{absf}'") elif nfo_skip_days > 0 and file_modification_days(nfo) <= nfo_skip_days: skip_nfo_days_cnt += 1 if debug: print(f"[!]Skip movie by it's .nfo which modified within {nfo_skip_days} days: '{absf}'") continue total.append(absf) if skip_failed_cnt: print(f"[!]Skip {skip_failed_cnt} movies in failed list '{failed_list_txt_path}'.") if skip_nfo_days_cnt: print( f"[!]Skip {skip_nfo_days_cnt} movies in source folder '{source}' who's .nfo modified within {nfo_skip_days} days.") if nfo_skip_days <= 0 or not link_mode or main_mode == 3: return total # 软连接方式,已经成功削刮的也需要从成功目录中检查.nfo更新天数,跳过N天内更新过的 skip_numbers = set() success_folder = Path(conf.success_folder()).resolve() for f in success_folder.glob(r'**/*'): if not re.match(r'\.nfo$', f.suffix, re.IGNORECASE): continue if file_modification_days(f) > nfo_skip_days: continue number = get_number(False, f.stem) if not number: continue skip_numbers.add(number.lower()) rm_list = [] for f in total: n_number = get_number(False, os.path.basename(f)) if n_number and n_number.lower() in skip_numbers: rm_list.append(f) for f in rm_list: total.remove(f) if debug: print(f"[!]Skip file successfully processed within {nfo_skip_days} days: '{f}'") if len(rm_list): print( f"[!]Skip {len(rm_list)} movies in success folder '{success_folder}' who's .nfo modified within {nfo_skip_days} days.") return total def create_failed_folder(failed_folder: str): """ 新建failed文件夹 """ if not os.path.exists(failed_folder): try: os.makedirs(failed_folder) except: print(f"[-]Fatal error! Can not make folder '{failed_folder}'") os._exit(0) def rm_empty_folder(path): """ Recursively removes empty folders from a given path. This function is useful for cleaning up the directory structure by removing folders that no longer contain any files. :param path: The path where empty folders will be searched for and removed. """ abspath = os.path.abspath(path) deleted = set() for current_dir, subdirs, files in os.walk(abspath, topdown=False): try: still_has_subdirs = any(_ for subdir in subdirs if os.path.join(current_dir, subdir) not in deleted) if not any(files) and not still_has_subdirs and not os.path.samefile(path, current_dir): os.rmdir(current_dir) deleted.add(current_dir) print('[+]Deleting empty folder', current_dir) except: pass def create_data_and_move(movie_path: str, zero_op: bool, no_net_op: bool, oCC): """ Processes a movie file, generates necessary data, and moves the file to an appropriate directory based on the outcome. This function is central to the application's file processing logic, including scraping, organizing, and error handling. :param movie_path: Path of the movie file to be processed. :param zero_op: A boolean flag indicating whether to perform a dry run (no actual file operations). :param no_net_op: A boolean flag to indicate whether network operations are to be skipped. :param oCC: An OpenCC instance for language conversion, if required. """ # Normalized number, eg: 111xxx-222.mp4 -> xxx-222.mp4 skip_file_names = config.getInstance().skip_file_names() debug = config.getInstance().debug() n_number = get_number(debug, os.path.basename(movie_path)) movie_path = os.path.abspath(movie_path) # print(movie_path) for skip_name in skip_file_names: if skip_name in movie_path: print('[+]Skipping file:{}'.format(movie_path)) return if debug is True: print(f"[!] [{n_number}] As Number Processing for '{movie_path}'") if zero_op: return if n_number: if no_net_op: core_main_no_net_op(movie_path, n_number) else:
core_main(movie_path, n_number, oCC)
5
2023-11-25 03:16:13+00:00
16k
abdulhaim/LMRL-Gym
llm_rl_scripts/maze/mc_returns/train_mc.py
[ { "identifier": "Text", "path": "LLM_RL/environment.py", "snippet": "class Text:\nclass TextTrajectory:\nclass TextTrajectoryChain:\nclass TextEnv(ABC):\nclass BatchedTextEnv(ABC):\nclass TextEnvToBatchedTextEnv(BatchedTextEnv):\nclass BatchedTextEnvToTextEnv(TextEnv):\nclass TextPolicy(ABC):\nclass Bat...
from typing import Optional, Dict, Any from JaxSeq.bucket_manager import open_with_bucket as open from JaxSeq.utils import convert_path, load_mesh, setup_experiment_save from JaxSeq.utils import BlockingStrategy, Padding, Truncation, get_weight_decay_mask from JaxSeq.models.gpt2.load import load_train_state, ModelLoadMode from transformers.generation import GenerationConfig from jaxtyping import PyTree from LLM_RL.environment import Text, text_env_eval, TextTrajectory, TextTrajectoryChain, TokenTrajectoryChain, text_history_to_str from LLM_RL.algorithms.mc_returns.data import MCData, MCDataset from LLM_RL.algorithms.value_rl_base.gpt2.interface import GPT2ValuePolicy from LLM_RL.heads.mlp_head import load_train_state_from_config as load_head_train_state_from_config from LLM_RL.heads.mlp_head import MLPHeadConfig from LLM_RL.algorithms.mc_returns.gpt2.interface import GPT2MCTrain, GPT2MCInference from functools import partial from JaxSeq.logs import log, pull_logs from transformers import GPT2TokenizerFast from IPython import embed from llm_rl_scripts.maze.env.maze_utils import setup_maze_env, pick_start_position from llm_rl_scripts.maze.env.mazes import double_t_maze_optimal_directions, double_t_maze from llm_rl_scripts.maze.env.env import describe_observation_give_position, maze_proposal_function from LLM_RL.algorithms.ppo.reranker_policy import ReRankerPolicy, ReRankerSamplePolicy from JaxSeq.shard_model import copy_sharded_pytree from LLM_RL.algorithms.mc_returns.base_interface import mc_loss from LLM_RL.algorithms.mc_returns.train import train_loop from LLM_RL.algorithms.mc_returns.data import MCData, MCDataset from LLM_RL.algorithms.mc_returns.score_fn import build_mc_score_fn import tyro import jax import jax.numpy as jnp import os import optax import pickle as pkl import re import numpy as np import json import random
14,175
def main( model_load_mode: ModelLoadMode, model_load_path: str, train_data_path: str, /, # Mark the end of positional arguments. exp_name: Optional[str]=None, outputs_path: Optional[str]=None, data_mesh_shape: int=1, fsdp_mesh_shape: int=1, model_mesh_shape: int=-1, use_wandb: bool=True, wandb_project: Optional[str]="llm_rl_repo_give_position_ilql", n_rounds: int=1, epochs: int=1, max_steps: Optional[int]=None, lr: float=1e-4, weight_decay: float=0.0, tau: float=0.95, cql_weight: float=0.0, gamma: float=0.99, train_bsize: int=32, grad_accum_steps: int=1, gradient_checkpointing: bool=False, gradient_checkpointing_policy: str='nothing_saveable', max_length: int=80, log_every: int=256, eval_every_steps: Optional[int]=10000, eval_every_epochs: Optional[int]=None, eval_at_beginning: bool=True, eval_at_end: bool=True, save_every_steps: Optional[int]=100000, save_every_epochs: Optional[int]=None, save_at_beginning: bool=True, save_at_end: bool=True, save_best: bool=False, max_checkpoints: Optional[int]=None, save_train_state: bool=True, save_bf16: bool=True, policy_max_input_length: int=256, policy_max_output_length: int=256, policy_do_sample: bool=True, policy_num_beams: int=1, policy_temperature: Optional[float]=None, policy_top_p: Optional[float]=None, policy_top_k: Optional[int]=None, force_pad_embeddings: bool=False, should_restore_loop_state: bool=False, reranker: bool=False, ): input_args = locals() print(input_args) tokenizer = GPT2TokenizerFast.from_pretrained('gpt2') tokenizer.add_special_tokens({'pad_token': '<|pad|>'}) mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp')) is_main_process = jax.process_index() == 0 print(f"Mesh: {mesh}") print(f"Is main process: {is_main_process}") def mc_data_generator(data_name): with open(data_name, "r") as f: for item in f: obj = json.loads(item) # curr_chain = TextTrajectory() # starting with the last element
def main( model_load_mode: ModelLoadMode, model_load_path: str, train_data_path: str, /, # Mark the end of positional arguments. exp_name: Optional[str]=None, outputs_path: Optional[str]=None, data_mesh_shape: int=1, fsdp_mesh_shape: int=1, model_mesh_shape: int=-1, use_wandb: bool=True, wandb_project: Optional[str]="llm_rl_repo_give_position_ilql", n_rounds: int=1, epochs: int=1, max_steps: Optional[int]=None, lr: float=1e-4, weight_decay: float=0.0, tau: float=0.95, cql_weight: float=0.0, gamma: float=0.99, train_bsize: int=32, grad_accum_steps: int=1, gradient_checkpointing: bool=False, gradient_checkpointing_policy: str='nothing_saveable', max_length: int=80, log_every: int=256, eval_every_steps: Optional[int]=10000, eval_every_epochs: Optional[int]=None, eval_at_beginning: bool=True, eval_at_end: bool=True, save_every_steps: Optional[int]=100000, save_every_epochs: Optional[int]=None, save_at_beginning: bool=True, save_at_end: bool=True, save_best: bool=False, max_checkpoints: Optional[int]=None, save_train_state: bool=True, save_bf16: bool=True, policy_max_input_length: int=256, policy_max_output_length: int=256, policy_do_sample: bool=True, policy_num_beams: int=1, policy_temperature: Optional[float]=None, policy_top_p: Optional[float]=None, policy_top_k: Optional[int]=None, force_pad_embeddings: bool=False, should_restore_loop_state: bool=False, reranker: bool=False, ): input_args = locals() print(input_args) tokenizer = GPT2TokenizerFast.from_pretrained('gpt2') tokenizer.add_special_tokens({'pad_token': '<|pad|>'}) mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp')) is_main_process = jax.process_index() == 0 print(f"Mesh: {mesh}") print(f"Is main process: {is_main_process}") def mc_data_generator(data_name): with open(data_name, "r") as f: for item in f: obj = json.loads(item) # curr_chain = TextTrajectory() # starting with the last element
last_trajectory = TextTrajectory([Text(obj[-1]["state"], False), Text(obj[-1]["action"], True)],
0
2023-11-21 00:16:42+00:00
16k
jzmzhong/Automatic-Prosody-Annotator-with-SSWP-CLAP
src/clap_module/conformer/encoder.py
[ { "identifier": "ConvolutionModule", "path": "src/clap_module/conformer/convolution.py", "snippet": "class ConvolutionModule(nn.Module):\r\n \"\"\"ConvolutionModule in Conformer model.\r\n\r\n Args:\r\n channels (int): The number of channels of conv layers.\r\n kernel_size (int): Ker...
import logging import torch import math from .convolution import ConvolutionModule from .encoder_layer import EncoderLayer from .modules import get_activation from .modules import VGG2L from .modules import ( LegacyRelPositionMultiHeadedAttention, MultiHeadedAttention, RelPositionMultiHeadedAttention, ) from .embedding import ( LegacyRelPositionalEncoding, PositionalEncoding, RelPositionalEncoding, ScaledPositionalEncoding, ) from .modules import LayerNorm from .multi_layer_conv import ( Conv1dLinear, MultiLayeredConv1d, ) from .modules import ( PositionwiseFeedForward, ) from .modules import repeat from .sub_sampling import Conv2dSubsampling from ..feature_fusion import AttentionPool1d, DAF, AFF, iAFF
14,369
# Copyright 2020 Johns Hopkins University (Shinji Watanabe) # Northwestern Polytechnical University (Pengcheng Guo) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Encoder definition.""" class Encoder(torch.nn.Module): """Conformer encoder module. Args: idim (int): Input dimension. attention_dim (int): Dimension of attention. attention_heads (int): The number of heads of multi head attention. linear_units (int): The number of units of position-wise feed forward. num_blocks (int): The number of decoder blocks. dropout_rate (float): Dropout rate. positional_dropout_rate (float): Dropout rate after adding positional encoding. attention_dropout_rate (float): Dropout rate in attention. input_layer (Union[str, torch.nn.Module]): Input layer type. normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear". positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer. macaron_style (bool): Whether to use macaron style for positionwise layer. pos_enc_layer_type (str): Encoder positional encoding layer type. selfattention_layer_type (str): Encoder attention layer type. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. zero_triu (bool): Whether to zero the upper triangular part of attention matrix. cnn_module_kernel (int): Kernerl size of convolution module. padding_idx (int): Padding idx for input_layer=embed. stochastic_depth_rate (float): Maximum probability to skip the encoder layer. intermediate_layers (Union[List[int], None]): indices of intermediate CTC layer. indices start from 1. if not None, intermediate outputs are returned (which changes return type signature.) """ def __init__( self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks=6, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer="conv2d", normalize_before=True, concat_after=False, ffn_layer_type="linear", ffn_conv_kernel_size=1, macaron_style=False, pos_enc_layer_type="abs_pos", selfattention_layer_type="selfattn", activation_type="relu", use_cnn_module=True, zero_triu=False, cnn_module_kernel=31, padding_idx=-1, stochastic_depth_rate=0.0, intermediate_layers=None, ctc_softmax=None, conditioning_layer_dim=None, max_seq_len=100, enable_fusion=False, fusion_type="", ): """Construct an Encoder object.""" super(Encoder, self).__init__() self.max_seq_len = max_seq_len activation = get_activation(activation_type) if pos_enc_layer_type == "abs_pos": pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "scaled_abs_pos": pos_enc_class = ScaledPositionalEncoding elif pos_enc_layer_type == "rel_pos": assert selfattention_layer_type == "rel_selfattn" pos_enc_class = RelPositionalEncoding elif pos_enc_layer_type == "legacy_rel_pos": assert selfattention_layer_type == "legacy_rel_selfattn" pos_enc_class = LegacyRelPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) self.conv_subsampling_factor = 1 if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(idim, attention_dim), torch.nn.LayerNorm(attention_dim), torch.nn.Dropout(dropout_rate), pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling( idim, attention_dim, dropout_rate, pos_enc_class(attention_dim, positional_dropout_rate), ) self.conv_subsampling_factor = 4 elif input_layer == "vgg2l":
# Copyright 2020 Johns Hopkins University (Shinji Watanabe) # Northwestern Polytechnical University (Pengcheng Guo) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Encoder definition.""" class Encoder(torch.nn.Module): """Conformer encoder module. Args: idim (int): Input dimension. attention_dim (int): Dimension of attention. attention_heads (int): The number of heads of multi head attention. linear_units (int): The number of units of position-wise feed forward. num_blocks (int): The number of decoder blocks. dropout_rate (float): Dropout rate. positional_dropout_rate (float): Dropout rate after adding positional encoding. attention_dropout_rate (float): Dropout rate in attention. input_layer (Union[str, torch.nn.Module]): Input layer type. normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear". positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer. macaron_style (bool): Whether to use macaron style for positionwise layer. pos_enc_layer_type (str): Encoder positional encoding layer type. selfattention_layer_type (str): Encoder attention layer type. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. zero_triu (bool): Whether to zero the upper triangular part of attention matrix. cnn_module_kernel (int): Kernerl size of convolution module. padding_idx (int): Padding idx for input_layer=embed. stochastic_depth_rate (float): Maximum probability to skip the encoder layer. intermediate_layers (Union[List[int], None]): indices of intermediate CTC layer. indices start from 1. if not None, intermediate outputs are returned (which changes return type signature.) """ def __init__( self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks=6, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer="conv2d", normalize_before=True, concat_after=False, ffn_layer_type="linear", ffn_conv_kernel_size=1, macaron_style=False, pos_enc_layer_type="abs_pos", selfattention_layer_type="selfattn", activation_type="relu", use_cnn_module=True, zero_triu=False, cnn_module_kernel=31, padding_idx=-1, stochastic_depth_rate=0.0, intermediate_layers=None, ctc_softmax=None, conditioning_layer_dim=None, max_seq_len=100, enable_fusion=False, fusion_type="", ): """Construct an Encoder object.""" super(Encoder, self).__init__() self.max_seq_len = max_seq_len activation = get_activation(activation_type) if pos_enc_layer_type == "abs_pos": pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "scaled_abs_pos": pos_enc_class = ScaledPositionalEncoding elif pos_enc_layer_type == "rel_pos": assert selfattention_layer_type == "rel_selfattn" pos_enc_class = RelPositionalEncoding elif pos_enc_layer_type == "legacy_rel_pos": assert selfattention_layer_type == "legacy_rel_selfattn" pos_enc_class = LegacyRelPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) self.conv_subsampling_factor = 1 if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(idim, attention_dim), torch.nn.LayerNorm(attention_dim), torch.nn.Dropout(dropout_rate), pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling( idim, attention_dim, dropout_rate, pos_enc_class(attention_dim, positional_dropout_rate), ) self.conv_subsampling_factor = 4 elif input_layer == "vgg2l":
self.embed = VGG2L(idim, attention_dim)
3
2023-11-25 02:38:32+00:00
16k
facebookresearch/ExPLORe
train_finetuning_pixels.py
[ { "identifier": "DrQLearner", "path": "rlpd/agents/drq/drq_learner.py", "snippet": "class DrQLearner(SACLearner):\n data_augmentation_fn: Callable = struct.field(pytree_node=False)\n\n @classmethod\n def create(\n cls,\n seed: int,\n observation_space: gym.Space,\n a...
import os import numpy as np import tqdm import wandb import matplotlib.pyplot as plt import pickle import roboverse import types import jax import jax.numpy as jnp from absl import app, flags from flax.core import FrozenDict from ml_collections import config_flags from flax.core import frozen_dict from flax.training import checkpoints from rlpd.agents import DrQLearner, PixelRND, PixelRM, PixelBCAgent from rlpd.data import MemoryEfficientReplayBuffer, ReplayBuffer from rlpd.evaluation import evaluate from rlpd.wrappers import wrap_pixels from rlpd.agents.drq.icvf import PixelICVF from rlpd import gc_dataset from gym.wrappers import TimeLimit, FilterObservation, RecordEpisodeStatistics from rlpd.data import Dataset from rlpd.data.cog_datasets import COGDataset from functools import partial
13,776
for k, v in info["episode"].items(): decode = {"r": "return", "l": "length", "t": "time"} wandb.log({f"episode/{decode[k]}": v}, step=record_step) if FLAGS.bc_pretrain_rollin > 0.0: curr_rng, rng = jax.random.split(rng) rollin_enabled = ( True if jax.random.uniform(key=curr_rng) < FLAGS.bc_pretrain_rollin else False ) # main updates if i >= FLAGS.start_training: online_batch = next(replay_buffer_iterator) if i >= FLAGS.start_training * 2: # update the reward model on the online batch if rm is not None: rm, rm_update_info = rm.update(online_batch, FLAGS.utd_ratio) logging_info.update(add_prefix("rm/", rm_update_info)) if rnd is not None: rnd, rnd_update_info = rnd.update( frozen_dict.freeze( { "observations": { k: ob[None] for k, ob in observation.items() }, "actions": action[None], "next_observations": { k: ob[None] for k, ob in next_observation.items() }, "rewards": np.array(reward)[None], "masks": np.array(mask)[None], "dones": np.array(done)[None], } ) ) logging_info.update(add_prefix("rnd/", rnd_update_info)) # prepare the batch for the main agent online_replace = {"bc_masks": jnp.ones_like(online_batch["masks"])} if FLAGS.use_rnd_online: online_replace["rewards"] = online_batch["rewards"] + rnd.get_reward( frozen_dict.freeze(online_batch) ) online_batch = online_batch.copy(add_or_replace=online_replace) if FLAGS.offline_ratio > 0: offline_batch = next(ds_iterator) offline_replace = { "bc_masks": jnp.ones_like(offline_batch["masks"]), "rewards": offline_batch["rewards"], } if FLAGS.offline_relabel_type in ["pred", "min"]: offline_replace["masks"] = rm.get_mask(offline_batch) if FLAGS.offline_relabel_type == "min": offline_replace["rewards"] = ( offline_batch["rewards"].at[:].set(ds_minr) ) if FLAGS.offline_relabel_type == "pred": offline_replace["rewards"] = rm.get_reward(offline_batch) if FLAGS.use_rnd_offline: offline_replace["rewards"] = offline_replace[ "rewards" ] + rnd.get_reward(frozen_dict.freeze(offline_batch)) offline_batch = offline_batch.copy(add_or_replace=offline_replace) batch = combine(offline_batch, online_batch) else: batch = online_batch # update the main agent agent, update_info = agent.update(batch, FLAGS.utd_ratio) logging_info.update(add_prefix("agent/", update_info)) if i % FLAGS.log_interval == 0: wandb.log({"env_step": i}, step=record_step) for k, v in logging_info.items(): wandb.log({k: v}, step=record_step) # visualize rewards rm and rnd rewards along a successful offline trajectory traj = ds.load_successful_traj() rnd_reward = [] rm_reward = [] for tran in traj: if rnd is not None: rnd_reward.append(rnd.get_reward(frozen_dict.freeze(tran)).item()) if rm is not None: rm_reward.append(rm.get_reward(frozen_dict.freeze(tran)).item()) if rm is not None: plt.clf() plt.plot(rm_reward, label="rm") plt.xlabel("step in offline trajectory") plt.ylabel("reward") plt.legend() plt.title("predicted rewards in successful offline trajectory") wandb.log( {"training/offline_success_traj_rewards_rm": plt}, step=record_step ) if rnd is not None: plt.clf() plt.plot(rnd_reward, label="rnd") plt.xlabel("step in offline trajectory") plt.ylabel("reward") plt.legend() plt.title("predicted rewards in successful offline trajectory") wandb.log( {"training/offline_success_traj_rewards_rnd": plt}, step=record_step ) if i % FLAGS.eval_interval == 0:
""" Modified from https://github.com/ikostrikov/rlpd/blob/main/rlpd/train_finetuning_pixels.py Original lincense information: MIT License Copyright (c) 2022 Ilya Kostrikov, Philip J. Ball, Laura Smith Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ #! /usr/bin/env python ### cog imports ### ### cog imports ### FLAGS = flags.FLAGS flags.DEFINE_string("project_name", "explore-cog", "wandb project name.") flags.DEFINE_string("env_name", "cheetah-run-v0", "Environment name.") flags.DEFINE_float( "dataset_subsample_ratio", 0.1, "Ratio of the dataset to subsample (done twice)" ) flags.DEFINE_bool("use_icvf", False, "Whether to use the icvf encoder") flags.DEFINE_float("offline_ratio", 0.5, "Offline ratio.") flags.DEFINE_integer("seed", 42, "Random seed.") flags.DEFINE_integer("eval_episodes", 100, "Number of episodes used for evaluation.") flags.DEFINE_integer("log_interval", 1000, "Logging interval.") flags.DEFINE_integer("eval_interval", 5000, "Eval interval.") flags.DEFINE_integer("batch_size", 256, "Mini batch size.") flags.DEFINE_integer("max_steps", 500000, "Number of training steps.") flags.DEFINE_integer( "start_training", 5000, "Number of training steps to start training." ) flags.DEFINE_boolean("tqdm", True, "Use tqdm progress bar.") flags.DEFINE_string("save_dir", "exp_data_cog", "Directory to save checkpoints.") flags.DEFINE_bool("checkpoint_model", False, "save model") flags.DEFINE_bool("checkpoint_buffer", False, "save replay buffer") flags.DEFINE_integer("utd_ratio", 1, "Update to data ratio.") flags.DEFINE_float("bc_pretrain_rollin", 0.0, "rollin coeff") flags.DEFINE_integer( "bc_pretrain_steps", 10000, "Pre-train BC policy for a number of steps on pure offline data", ) config_flags.DEFINE_config_file( "config", "configs/rlpd_pixels_config.py", "File path to the training hyperparameter configuration.", lock_config=False, ) config_flags.DEFINE_config_file( "rm_config", "configs/pixel_rm_config.py", "File path to the training hyperparameter configuration.", lock_config=False, ) config_flags.DEFINE_config_file( "rnd_config", "configs/pixel_rnd_config.py", "File path to the training hyperparameter configuration.", lock_config=False, ) config_flags.DEFINE_config_file( "bc_config", "configs/pixel_bc_config.py", "File path to the training hyperparameter configuration", lock_config=False, ) flags.DEFINE_string( "offline_relabel_type", "gt", "Whether to use reward from the offline dataset. [gt/pred/min]", ) flags.DEFINE_boolean("use_rnd_offline", False, "Whether to use rnd offline.") flags.DEFINE_boolean("use_rnd_online", False, "Whether to use rnd online.") def combine(one_dict, other_dict): combined = {} for k, v in one_dict.items(): if isinstance(v, FrozenDict) or isinstance(v, dict): if len(v) == 0: combined[k] = v else: combined[k] = combine(v, other_dict[k]) else: tmp = np.empty( (v.shape[0] + other_dict[k].shape[0], *v.shape[1:]), dtype=v.dtype ) tmp[0::2] = v tmp[1::2] = other_dict[k] combined[k] = tmp return FrozenDict(combined) def add_prefix(prefix, dict): return {prefix + k: v for k, v in dict.items()} def main(_): wandb.init(project=FLAGS.project_name, mode="online") wandb.config.update(FLAGS) if FLAGS.save_dir is not None: log_dir = os.path.join( FLAGS.save_dir, f"{FLAGS.env_name}-s{FLAGS.seed}-icvf_{FLAGS.use_icvf}-ours_{FLAGS.use_rnd_offline}", ) print("logging to", log_dir) if FLAGS.checkpoint_model: chkpt_dir = os.path.join(log_dir, "checkpoints") os.makedirs(chkpt_dir, exist_ok=True) if FLAGS.checkpoint_buffer: buffer_dir = os.path.join(log_dir, "buffers") os.makedirs(buffer_dir, exist_ok=True) def wrap(env): return wrap_pixels( env, action_repeat=1, num_stack=1, camera_id=0, ) def render(env, *args, **kwargs): return env.render_obs() if FLAGS.env_name == "Widow250PickTray-v0": env_name_alt = "pickplace" cog_max_path_length = 40 elif FLAGS.env_name == "Widow250DoubleDrawerOpenGraspNeutral-v0": env_name_alt = "closeddrawer_small" cog_max_path_length = 50 elif FLAGS.env_name == "Widow250DoubleDrawerCloseOpenGraspNeutral-v0": env_name_alt = "blockeddrawer1_small" cog_max_path_length = 80 env = roboverse.make(FLAGS.env_name, transpose_image=False) env.render = types.MethodType(render, env) env = FilterObservation(env, ["image"]) env = TimeLimit(env, max_episode_steps=cog_max_path_length) # TODO env, pixel_keys = wrap(env) env = RecordEpisodeStatistics(env, deque_size=1) env.seed(FLAGS.seed) eval_env = roboverse.make(FLAGS.env_name, transpose_image=False) eval_env.render = types.MethodType(render, eval_env) eval_env = FilterObservation(eval_env, ["image"]) eval_env = TimeLimit(eval_env, max_episode_steps=cog_max_path_length) # TODO eval_env, _ = wrap(eval_env) eval_env.seed(FLAGS.seed + 42) dataset_path = os.path.join("data", env_name_alt) print("Data Path:", dataset_path) np_rng = np.random.default_rng(FLAGS.seed) ds = COGDataset( env=env, dataset_path=dataset_path, capacity=300000, subsample_ratio=FLAGS.dataset_subsample_ratio, np_rng=np_rng, ) ds.seed(FLAGS.seed) ds_minr = ds.dataset_dict["rewards"][: len(ds)].min() assert -10 < ds_minr < 10, "maybe sampling reward outside of buffer range" ds_iterator = ds.get_iterator( sample_args={ "batch_size": int(FLAGS.batch_size * FLAGS.utd_ratio * FLAGS.offline_ratio), "pack_obs_and_next_obs": True, } ) replay_buffer = MemoryEfficientReplayBuffer( env.observation_space, env.action_space, FLAGS.max_steps ) replay_buffer_iterator = replay_buffer.get_iterator( sample_args={ "batch_size": int( FLAGS.batch_size * FLAGS.utd_ratio * (1 - FLAGS.offline_ratio) ), "pack_obs_and_next_obs": True, } ) replay_buffer.seed(FLAGS.seed) ########### MODELS ########### # Crashes on some setups if agent is created before replay buffer. kwargs = dict(FLAGS.config) model_cls = kwargs.pop("model_cls") agent = globals()[model_cls].create( FLAGS.seed, env.observation_space, env.action_space, pixel_keys=pixel_keys, **kwargs, ) if FLAGS.offline_relabel_type != "gt": kwargs = dict(FLAGS.rm_config) model_cls = kwargs.pop("model_cls") rm = globals()[model_cls].create( FLAGS.seed + 123, env.observation_space, env.action_space, pixel_keys=pixel_keys, **kwargs, ) else: rm = None if FLAGS.use_rnd_offline or FLAGS.use_rnd_online: kwargs = dict(FLAGS.rnd_config) model_cls = kwargs.pop("model_cls") rnd = globals()[model_cls].create( FLAGS.seed + 123, env.observation_space, env.action_space, pixel_keys=pixel_keys, **kwargs, ) else: rnd = None # Pre-training record_step = 0 # ICVF training and initialize RM and RND with ICVF encoder if FLAGS.use_icvf: # assert rm is not None or rnd is not None, "ICVF is not needed in this configuration" icvf = PixelICVF.create( FLAGS.seed, env.observation_space, env.action_space, pixel_keys=pixel_keys, **dict(FLAGS.config), ) gc_ds = gc_dataset.GCSDataset(ds, **gc_dataset.GCSDataset.get_default_config()) for i in tqdm.trange(75001): record_step += 1 batch = gc_ds.sample(FLAGS.batch_size) icvf, update_info = icvf.update(frozen_dict.freeze(batch), 1) if i % FLAGS.log_interval == 0: for k, v in update_info.items(): wandb.log({f"icvf-training/{k}": v}, step=record_step) replace_keys = ["encoder_0"] replace = {k: icvf.net.params[k] for k in replace_keys} if rnd is not None: new_params = FrozenDict(rnd.net.params).copy(add_or_replace=replace) new_frozen_params = FrozenDict(rnd.frozen_net.params).copy( add_or_replace=replace ) rnd = rnd.replace( net=rnd.net.replace(params=new_params), frozen_net=rnd.frozen_net.replace(params=new_frozen_params), ) if rm is not None: new_params = FrozenDict(rm.r_net.params).copy(add_or_replace=replace) rm = rm.replace(r_net=rm.r_net.replace(params=new_params)) if FLAGS.bc_pretrain_rollin > 0.0: kwargs = dict(FLAGS.bc_config) model_cls = kwargs.pop("model_cls") bc_policy = globals()[model_cls].create( FLAGS.seed + 152, env.observation_space, env.action_space, **kwargs ) if FLAGS.use_icvf: new_params = FrozenDict(bc_policy.actor.params).copy(add_or_replace=replace) bc_policy = bc_policy.replace( actor=bc_policy.actor.replace(params=new_params) ) else: bc_policy = None if bc_policy is not None: for i in tqdm.tqdm( range(FLAGS.bc_pretrain_steps), smoothing=0.1, disable=not FLAGS.tqdm ): record_step += 1 batch = ds.sample(int(FLAGS.batch_size * FLAGS.utd_ratio)) bc_policy, update_info = bc_policy.update(batch, FLAGS.utd_ratio) if i % FLAGS.log_interval == 0: for k, v in update_info.items(): wandb.log(add_prefix("bc/", {k: v}), step=record_step) # Training observation, done = env.reset(), False rng = jax.random.PRNGKey(seed=FLAGS.seed) if FLAGS.bc_pretrain_rollin > 0.0: curr_rng, rng = jax.random.split(rng) rollin_enabled = ( True if jax.random.uniform(key=curr_rng) < FLAGS.bc_pretrain_rollin else False ) else: rollin_enabled = False for i in tqdm.tqdm( range(1, FLAGS.max_steps + 1), smoothing=0.1, disable=not FLAGS.tqdm, ): record_step += 1 logging_info = {} if rollin_enabled: action, bc_policy = bc_policy.sample_actions(observation) curr_rng, rng = jax.random.split(rng) rollin_enabled = ( True if jax.random.uniform(key=curr_rng) < agent.discount else False ) else: if i < FLAGS.start_training: action = env.action_space.sample() else: action, agent = agent.sample_actions(observation) next_observation, reward, done, info = env.step(action) if not done or "TimeLimit.truncated" in info: mask = 1.0 else: mask = 0.0 replay_buffer.insert( dict( observations=observation, actions=action, rewards=reward, masks=mask, dones=done, next_observations=next_observation, ) ) observation = next_observation if done: observation, done = env.reset(), False for k, v in info["episode"].items(): decode = {"r": "return", "l": "length", "t": "time"} wandb.log({f"episode/{decode[k]}": v}, step=record_step) if FLAGS.bc_pretrain_rollin > 0.0: curr_rng, rng = jax.random.split(rng) rollin_enabled = ( True if jax.random.uniform(key=curr_rng) < FLAGS.bc_pretrain_rollin else False ) # main updates if i >= FLAGS.start_training: online_batch = next(replay_buffer_iterator) if i >= FLAGS.start_training * 2: # update the reward model on the online batch if rm is not None: rm, rm_update_info = rm.update(online_batch, FLAGS.utd_ratio) logging_info.update(add_prefix("rm/", rm_update_info)) if rnd is not None: rnd, rnd_update_info = rnd.update( frozen_dict.freeze( { "observations": { k: ob[None] for k, ob in observation.items() }, "actions": action[None], "next_observations": { k: ob[None] for k, ob in next_observation.items() }, "rewards": np.array(reward)[None], "masks": np.array(mask)[None], "dones": np.array(done)[None], } ) ) logging_info.update(add_prefix("rnd/", rnd_update_info)) # prepare the batch for the main agent online_replace = {"bc_masks": jnp.ones_like(online_batch["masks"])} if FLAGS.use_rnd_online: online_replace["rewards"] = online_batch["rewards"] + rnd.get_reward( frozen_dict.freeze(online_batch) ) online_batch = online_batch.copy(add_or_replace=online_replace) if FLAGS.offline_ratio > 0: offline_batch = next(ds_iterator) offline_replace = { "bc_masks": jnp.ones_like(offline_batch["masks"]), "rewards": offline_batch["rewards"], } if FLAGS.offline_relabel_type in ["pred", "min"]: offline_replace["masks"] = rm.get_mask(offline_batch) if FLAGS.offline_relabel_type == "min": offline_replace["rewards"] = ( offline_batch["rewards"].at[:].set(ds_minr) ) if FLAGS.offline_relabel_type == "pred": offline_replace["rewards"] = rm.get_reward(offline_batch) if FLAGS.use_rnd_offline: offline_replace["rewards"] = offline_replace[ "rewards" ] + rnd.get_reward(frozen_dict.freeze(offline_batch)) offline_batch = offline_batch.copy(add_or_replace=offline_replace) batch = combine(offline_batch, online_batch) else: batch = online_batch # update the main agent agent, update_info = agent.update(batch, FLAGS.utd_ratio) logging_info.update(add_prefix("agent/", update_info)) if i % FLAGS.log_interval == 0: wandb.log({"env_step": i}, step=record_step) for k, v in logging_info.items(): wandb.log({k: v}, step=record_step) # visualize rewards rm and rnd rewards along a successful offline trajectory traj = ds.load_successful_traj() rnd_reward = [] rm_reward = [] for tran in traj: if rnd is not None: rnd_reward.append(rnd.get_reward(frozen_dict.freeze(tran)).item()) if rm is not None: rm_reward.append(rm.get_reward(frozen_dict.freeze(tran)).item()) if rm is not None: plt.clf() plt.plot(rm_reward, label="rm") plt.xlabel("step in offline trajectory") plt.ylabel("reward") plt.legend() plt.title("predicted rewards in successful offline trajectory") wandb.log( {"training/offline_success_traj_rewards_rm": plt}, step=record_step ) if rnd is not None: plt.clf() plt.plot(rnd_reward, label="rnd") plt.xlabel("step in offline trajectory") plt.ylabel("reward") plt.legend() plt.title("predicted rewards in successful offline trajectory") wandb.log( {"training/offline_success_traj_rewards_rnd": plt}, step=record_step ) if i % FLAGS.eval_interval == 0:
eval_info, _ = evaluate(
6
2023-11-19 21:28:52+00:00
16k
Luo-Z13/pointobb
PointOBB/mmdet/models/detectors/PointOBB.py
[ { "identifier": "DETECTORS", "path": "PointOBB/mmdet/models/builder.py", "snippet": "DETECTORS = MODELS" }, { "identifier": "TwoStageDetector", "path": "PointOBB/mmdet/models/detectors/two_stage.py", "snippet": "class TwoStageDetector(BaseDetector):\n \"\"\"Base class for two-stage de...
import copy import torch import numpy as np import copy import math import cv2 import os from ..builder import DETECTORS from .two_stage import TwoStageDetector from mmdet.core.bbox import bbox_xyxy_to_cxcywh from mmdet.core import bbox_cxcywh_to_xyxy from mmdet.core.bbox.iou_calculators import bbox_overlaps from ..builder import build_head from torch.nn import functional as F from ..builder import HEADS, build_loss from typing import Tuple, Union from torch import Tensor from torch.nn.functional import grid_sample from torchvision import transforms from .P2BNet import gen_proposals_from_cfg from .utils import resize_proposal, resize_single_proposal, flip_tensor, hboxlist2cxcywha \ ,merge_batch_list, split_batch_list, box_iou_rotated, obb2poly_np
11,271
cls_score_v1_prob = cls_score_v1_prob.reshape(cls_score_v1.size(0), num_base_scales, -1) ins_score_v1_prob = ins_score_v1_prob.reshape(ins_score_v1.size(0), num_base_scales, -1) cls_score_v2_prob = cls_score_v2_prob.reshape(cls_score_v2.size(0), num_base_scales, -1) ins_score_v2_prob = ins_score_v2_prob.reshape(ins_score_v2.size(0), num_base_scales, -1) cls_similarity = 1 - F.cosine_similarity(cls_score_v1_prob, cls_score_v2_prob, dim=-1, eps=1e-6) ins_similarity = 1 - F.cosine_similarity(ins_score_v1_prob, ins_score_v2_prob, dim=-1, eps=1e-6) score_similarity = 1 - F.cosine_similarity(prob_v1, prob_v2, dim=1, eps=1e-6) return cls_similarity, ins_similarity, score_similarity # def Cross_View_Sim(self, results_v1v2, gt_labels, proposals_valid_list, mode = 'scales', stage = 0): # gt_label = torch.cat(gt_labels) # half_num = len(gt_label)//2 # proposals_valid_all = torch.cat(proposals_valid_list) # half_num_vaild = len(proposals_valid_all)//2 # # with torch.no_grad(): # base_proposal_cfg = self.train_cfg.get('base_proposal',self.test_cfg.rpn) # fine_proposal_cfg = self.train_cfg.get('fine_proposal',self.test_cfg.rpn) # if mode == 'scales': # num_base_scales = len(base_proposal_cfg['base_scales']) # elif mode == 'ratios': # num_base_scales = len(base_proposal_cfg['base_ratios']) # elif mode == 'gts': # num_base_scales = len(base_proposal_cfg['base_scales']) * len(base_proposal_cfg['base_ratios']) # if stage >=1: # if isinstance(fine_proposal_cfg['base_ratios'], tuple): # num_base_scales = len(fine_proposal_cfg['base_ratios'][stage - 1]) # # shake_ratio = fine_proposal_cfg['shake_ratio'][stage - 1] # else: # num_base_scales = len(fine_proposal_cfg['base_ratios']) # # shake_ratio = fine_proposal_cfg['shake_ratio'] # cls_score_v1 = results_v1v2['cls_score'][:half_num,...] # [num_gt, num_pros, num_cls+1]) # ins_score_v1 = results_v1v2['ins_score'][:half_num,...] # proposal_vaild_v1 = proposals_valid_all[:half_num_vaild,...].reshape(half_num, -1) # proposal_vaild_v2 = proposals_valid_all[half_num_vaild:,...].reshape(half_num, -1) # proposal_vaild = proposal_vaild_v1 * proposal_vaild_v2 # if stage < 1: # cls_score_v1_prob = cls_score_v1.softmax(dim=-1) # elif stage >= 1: # cls_score_v1_prob = cls_score_v1.sigmoid() # cls_score_v1_prob = cls_score_v1_prob * proposal_vaild[...,None] # ins_score_v1_prob = ins_score_v1.softmax(dim=1) * proposal_vaild[...,None] # ins_score_v1_prob = F.normalize(ins_score_v1_prob, dim=1, p=1) # prob_v1 = (cls_score_v1_prob * ins_score_v1_prob).sum(dim=1) # cls_score_v2 = results_v1v2['cls_score'][half_num:,...] # ins_score_v2 = results_v1v2['ins_score'][half_num:,...] # if stage < 1: # cls_score_v2_prob = cls_score_v2.softmax(dim=-1) # elif stage >= 1: # cls_score_v2_prob = cls_score_v2.sigmoid() # cls_score_v2_prob = cls_score_v2_prob * proposal_vaild[...,None] # ins_score_v2_prob = ins_score_v2.softmax(dim=1) * proposal_vaild[...,None] # ins_score_v2_prob = F.normalize(ins_score_v2_prob, dim=1, p=1) # prob_v2 = (cls_score_v2_prob * ins_score_v2_prob).sum(dim=1) # if stage >= 1: # cls_score_v1_prob_list = [] # cls_score_v2_prob_list = [] # ins_score_v1_prob_list = [] # ins_score_v2_prob_list = [] # for i in range(half_num): # cls_score_v1_prob_list.append(cls_score_v1_prob[i, ..., gt_label[i]].unsqueeze(0)) # cls_score_v2_prob_list.append(cls_score_v2_prob[i, ..., gt_label[i]].unsqueeze(0)) # ins_score_v1_prob_list.append(ins_score_v1_prob[i, ..., gt_label[i]].unsqueeze(0)) # ins_score_v2_prob_list.append(ins_score_v2_prob[i, ..., gt_label[i]].unsqueeze(0)) # cls_score_v1_prob = torch.cat(cls_score_v1_prob_list, dim=0) # cls_score_v2_prob = torch.cat(cls_score_v2_prob_list, dim=0) # ins_score_v1_prob = torch.cat(ins_score_v1_prob_list, dim=0) # ins_score_v2_prob = torch.cat(ins_score_v2_prob_list, dim=0) # cls_score_v1_prob = cls_score_v1_prob.reshape(cls_score_v1.size(0), num_base_scales, -1) # # cls_score_v1_prob = cls_score_v1_prob * proposal_vaild_v1 # ins_score_v1_prob = ins_score_v1_prob.reshape(ins_score_v1.size(0), num_base_scales, -1) # cls_score_v2_prob = cls_score_v2_prob.reshape(cls_score_v2.size(0), num_base_scales, -1) # ins_score_v2_prob = ins_score_v2_prob.reshape(ins_score_v2.size(0), num_base_scales, -1) # cls_similarity = 1 - F.cosine_similarity(cls_score_v1_prob, cls_score_v2_prob, dim=-1, eps=1e-6) # ins_similarity = 1 - F.cosine_similarity(ins_score_v1_prob, ins_score_v2_prob, dim=-1, eps=1e-6) # score_similarity = 1 - F.cosine_similarity(prob_v1, prob_v2, dim=1, eps=1e-6) # return cls_similarity, ins_similarity, score_similarity def forward_train(self, img, img_metas, gt_bboxes, gt_true_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None, **kwargs): if self.iter_count == self.burn_in_steps1: self.roi_head.use_angle_loss = True print(f'#####iter_count1 use_angle_loss:{self.iter_count}#####') if self.construct_resize: self.construct_resize = False if self.iter_count == self.burn_in_steps2: if self.roi_head.use_angle_loss: self.roi_head.add_angle_pred_begin = True print(f'#####iter_count2 add_angle_pred_begin:{self.iter_count}#####') base_proposal_cfg = self.train_cfg.get('base_proposal', self.test_cfg.rpn) fine_proposal_cfg = self.train_cfg.get('fine_proposal', self.test_cfg.rpn) losses = dict() gt_points = [bbox_xyxy_to_cxcywh(b)[:, :2] for b in gt_bboxes] if self.stage == 0:
# from mmdet.datasets.utils import obb2poly_np def resize_image(inputs, resize_ratio=0.5): down_inputs = F.interpolate(inputs, scale_factor=resize_ratio, mode='nearest') return down_inputs def fine_rotate_proposals_from_cfg(pseudo_boxes, fine_proposal_cfg, img_meta, stage): gen_mode = fine_proposal_cfg['gen_proposal_mode'] # cut_mode = fine_proposal_cfg['cut_mode'] cut_mode = None if isinstance(fine_proposal_cfg['base_ratios'], tuple): base_ratios = fine_proposal_cfg['base_ratios'][stage - 1] shake_ratio = fine_proposal_cfg['shake_ratio'][stage - 1] else: base_ratios = fine_proposal_cfg['base_ratios'] shake_ratio = fine_proposal_cfg['shake_ratio'] if gen_mode == 'fix_gen': proposal_list = [] proposals_valid_list = [] for i in range(len(img_meta)): pps = [] base_boxes = pseudo_boxes[i] for ratio_w in base_ratios: for ratio_h in base_ratios: base_boxes_ = base_boxes.clone() base_boxes_[:, 2] *= ratio_w base_boxes_[:, 3] *= ratio_h pps.append(base_boxes_.unsqueeze(1)) pps_old = torch.cat(pps, dim=1) if shake_ratio is not None: pps_new = [] pps_new.append(pps_old.reshape(*pps_old.shape[0:2], -1, 5)) for ratio in shake_ratio: pps = pps_old.clone() pps_center = pps[:, :, :2] pps_wh = pps[:, :, 2:4] pps_angle = pps[:, :, 4].unsqueeze(2) pps_x_l = pps_center[:, :, 0] - ratio * pps_wh[:, :, 0] pps_x_r = pps_center[:, :, 0] + ratio * pps_wh[:, :, 0] pps_y_t = pps_center[:, :, 1] - ratio * pps_wh[:, :, 1] pps_y_d = pps_center[:, :, 1] + ratio * pps_wh[:, :, 1] pps_center_l = torch.stack([pps_x_l, pps_center[:, :, 1]], dim=-1) pps_center_r = torch.stack([pps_x_r, pps_center[:, :, 1]], dim=-1) pps_center_t = torch.stack([pps_center[:, :, 0], pps_y_t], dim=-1) pps_center_d = torch.stack([pps_center[:, :, 0], pps_y_d], dim=-1) pps_center = torch.stack([pps_center_l, pps_center_r, pps_center_t, pps_center_d], dim=2) pps_wh = pps_wh.unsqueeze(2).expand(pps_center.shape) pps_angle = pps_angle.unsqueeze(2).expand((pps_center.size()[0], pps_center.size()[1], pps_center.size()[2], 1)) pps = torch.cat([pps_center, pps_wh, pps_angle], dim=-1) pps = pps.reshape(pps.shape[0], -1, 5) pps_new.append(pps.reshape(*pps_old.shape[0:2], -1, 5)) pps_new = torch.cat(pps_new, dim=2) else: pps_new = pps_old h, w, _ = img_meta[i]['img_shape'] if cut_mode is 'clamp': pps_new[..., 0:4:2] = torch.clamp(pps_new[..., 0:4:2], 0, w) pps_new[..., 1:4:2] = torch.clamp(pps_new[..., 1:4:2], 0, h) proposals_valid_list.append(pps_new.new_full( (*pps_new.shape[0:3], 1), 1, dtype=torch.long).reshape(-1, 1)) else: rot_theta = base_boxes[:,-1].mean() img_xywh = pps_new.new_tensor([w/2, h/2, w, h, rot_theta]) # (cx,cy,w,h,theta) iof_in_img = box_iou_rotated(pps_new.reshape(-1, 5), img_xywh.unsqueeze(0), mode='iof') proposals_valid = iof_in_img > 0.8 proposals_valid_list.append(proposals_valid) proposal_list.append(pps_new.reshape(-1, 5)) return proposal_list, proposals_valid_list def gen_rotate_negative_proposals(gt_points, proposal_cfg, aug_generate_proposals, img_meta): num_neg_gen = proposal_cfg['gen_num_neg'] if num_neg_gen == 0: return None, None neg_proposal_list = [] neg_weight_list = [] device = gt_points[0].device for i in range(len(gt_points)): pos_box = aug_generate_proposals[i] h, w, _ = img_meta[i]['img_shape'] x1 = -0.2 * w + torch.rand(num_neg_gen) * (1.2 * w) y1 = -0.2 * h + torch.rand(num_neg_gen) * (1.2 * h) x2 = x1 + torch.rand(num_neg_gen) * (1.2 * w - x1) y2 = y1 + torch.rand(num_neg_gen) * (1.2 * h - y1) neg_theta = torch.ones_like(x1)*(pos_box[:,-1].mean().cpu()) neg_bboxes = torch.stack([(x1 + x2) / 2, (y1 + y2) / 2, x2 - x1, y2 - y1, neg_theta], dim=1).to(device) iou = box_iou_rotated(neg_bboxes, pos_box) neg_weight = ((iou < 0.3).sum(dim=1) == iou.shape[1]) neg_proposal_list.append(neg_bboxes) neg_weight_list.append(neg_weight) return neg_proposal_list, neg_weight_list def resize_rotate_proposal(img_metas, batch_gt_bboxes, batch_proposals, gt_true_bboxes, gt_bboxes_ignore, ratio = 0.5): ''' batch_gt_bboxes_all: [batch_size, num_proposals, 5] [cx,cy,w,h,a] batch_proposals_all: [batch_size, num_proposals, 5] [cx,cy,w,h,a] ''' img_meta_out = copy.deepcopy(img_metas) batch_gt_bboxes_out = [] batch_proposals_out =[] gt_true_bboxes_out = [] gt_bboxes_ignore_out = [] for i in range(len(img_metas)): h, w, c = img_metas[i]['img_shape'] img_meta_out[i]['img_shape'] = (math.ceil(h * ratio), math.ceil(w * ratio), c) img_meta_out[i]['pad_shape'] = (math.ceil(h * ratio), math.ceil(w * ratio), c) tmp_gt_bboxes = batch_gt_bboxes[i].clone() tmp_gt_bboxes[:,:4] = tmp_gt_bboxes[:,:4] * ratio batch_gt_bboxes_out.append(tmp_gt_bboxes) tmp_proposal = batch_proposals[i].clone() tmp_proposal[:,:4] = tmp_proposal[:,:4] * ratio batch_proposals_out.append(tmp_proposal) tmp_gt_true_bbox = gt_true_bboxes[i].clone() tmp_gt_true_bbox[:,:4] = tmp_gt_true_bbox[:,:4] * ratio gt_true_bboxes_out.append(tmp_gt_true_bbox) tmp_gt_bboxes_ignore = gt_bboxes_ignore[i].clone() if gt_bboxes_ignore[i].size(0) != 0: tmp_gt_bboxes_ignore[:,:,:4] = tmp_gt_bboxes_ignore[:,:4] * ratio gt_bboxes_ignore_out.append(tmp_gt_bboxes_ignore) return img_meta_out, batch_gt_bboxes_out, batch_proposals_out, gt_true_bboxes_out, gt_bboxes_ignore_out @DETECTORS.register_module() class PointOBB(TwoStageDetector): def __init__(self, backbone, roi_head, train_cfg, test_cfg, construct_view = True, construct_resize = False, loss_diff_view=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0), crop_size = (1024, 1024), padding = 'reflection', view_range: Tuple[float, float] = (0.25, 0.75), bbox_head=None, neck=None, pretrained=None, init_cfg=None): super(PointOBB, self).__init__( backbone=backbone, neck=neck, roi_head=roi_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained, init_cfg=init_cfg) self.num_stages = roi_head.num_stages self.stage = 0 print(f'========={self.stage}===========') if bbox_head is not None: self.with_bbox_head = True self.bbox_head = build_head(bbox_head) self.crop_size = crop_size self.padding = padding self.view_range = view_range self.loss_diff_view = build_loss(loss_diff_view) self.construct_view = construct_view self.construct_resize = construct_resize if train_cfg is not None: self.iter_count = train_cfg.get("iter_count") self.burn_in_steps1 = train_cfg.get("burn_in_steps1") self.burn_in_steps2 = train_cfg.get("burn_in_steps2") def rotate_crop( self, batch_inputs: Tensor, rot: float = 0., size: Tuple[int, int] = (768, 768), batch_gt_instances = None, padding: str = 'reflection'): """ Args: batch_inputs (Tensor): Input images of shape (N, C, H, W). These should usually be mean centered and std scaled. rot (float): Angle of view rotation. Defaults to 0. size (tuple[int]): Crop size from image center. Defaults to (768, 768). batch_gt_instances (list[:obj:`InstanceData`]): Batch of gt_instance. padding (str): Padding method of image black edge. Defaults to 'reflection'. Returns: Processed batch_inputs (Tensor) and batch_gt_instances (list[:obj:`InstanceData`]) """ device = batch_inputs.device n, c, h, w = batch_inputs.shape size_h, size_w = size crop_h = (h - size_h) // 2 crop_w = (w - size_w) // 2 if rot != 0: cosa, sina = math.cos(rot), math.sin(rot) tf = batch_inputs.new_tensor([[cosa, -sina], [sina, cosa]], dtype=torch.float) x_range = torch.linspace(-1, 1, w, device=device) y_range = torch.linspace(-1, 1, h, device=device) y, x = torch.meshgrid(y_range, x_range) grid = torch.stack([x, y], -1).expand([n, -1, -1, -1]) grid = grid.reshape(-1, 2).matmul(tf).view(n, h, w, 2) # rotate batch_inputs = grid_sample( batch_inputs, grid, 'bilinear', padding, align_corners=True) if batch_gt_instances is not None: for i, gt_instances in enumerate(batch_gt_instances): gt_bboxes = gt_instances xy, wh, a = gt_bboxes[..., :2], gt_bboxes[ ..., 2:4], gt_bboxes[..., [4]] ctr = tf.new_tensor([[w / 2, h / 2]]) xy = (xy - ctr).matmul(tf.T) + ctr a = a + rot rot_gt_bboxes = torch.cat([xy, wh, a], dim=-1) batch_gt_instances[i] = rot_gt_bboxes batch_inputs = batch_inputs[..., crop_h:crop_h + size_h, crop_w:crop_w + size_w] if batch_gt_instances is None: return batch_inputs else: # rot == 0 for i, gt_instances in enumerate(batch_gt_instances): gt_bboxes = gt_instances xy, wh, a = gt_bboxes[..., :2], gt_bboxes[..., 2:4], gt_bboxes[..., [4]] xy = xy - xy.new_tensor([[crop_w, crop_h]]) crop_gt_bboxes = torch.cat([xy, wh, a], dim=-1) batch_gt_instances[i] = crop_gt_bboxes return batch_inputs, batch_gt_instances def construct_Rview(self, img, generate_proposals_0, gt_bboxes, img_metas, gt_labels, gt_true_bboxes, gt_bboxes_ignore, proposals_valid_list_0): img_ori = img.clone() # 1) # Crop original images and gts batch_gt_bboxes = hboxlist2cxcywha(gt_bboxes) batch_proposals = hboxlist2cxcywha(generate_proposals_0) batch_instances_all, interval_flag = merge_batch_list(batch_gt_bboxes, batch_proposals) img, batch_instances_all = self.rotate_crop(img, 0, self.crop_size, batch_instances_all, self.padding) offset_gt = 1 offset = 1 for i, img_meta in enumerate(img_metas): img_meta['gt_bid'] = torch.arange(0, interval_flag[i][0], 1, device=batch_instances_all[i].device) + offset_gt + 0.2 offset_gt += interval_flag[i][0] img_meta['bid'] = torch.arange(0, interval_flag[i][1], 1, device=batch_instances_all[i].device) + offset + 0.2 offset += interval_flag[i][1] # 2) # Generate rotated images and gts rot = math.pi * ( torch.rand(1, device=img.device) * (self.view_range[1] - self.view_range[0]) + self.view_range[0]) batch_instance_rot = copy.deepcopy(batch_instances_all) img_metas_rot = copy.deepcopy(img_metas) img_rot, batch_instance_rot = self.rotate_crop( img, rot, self.crop_size, batch_instance_rot, self.padding) offset_gt = 1 offset = 1 for i, img_meta in enumerate(img_metas_rot): img_meta['gt_bid'] = torch.arange(0, interval_flag[i][0], 1, device=batch_instance_rot[i].device) + offset_gt + 0.4 offset_gt += interval_flag[i][0] img_meta['bid'] = torch.arange(0, interval_flag[i][1], 1, device=batch_instance_rot[i].device) + offset + 0.4 offset += interval_flag[i][1] # 3) # Generate flipped images and gts img_flp = transforms.functional.vflip(img) batch_instances_flp = copy.deepcopy(batch_instances_all) img_metas_flp = copy.deepcopy(img_metas) offset_gt = 1 offset = 1 for i, img_meta in enumerate(img_metas_flp): batch_instances_flp[i] = flip_tensor(batch_instances_flp[i], img.shape[2:4], 'vertical' ) img_meta['gt_bid'] = torch.arange(0, interval_flag[i][0], 1, device=batch_instances_flp[i].device) + offset_gt + 0.6 offset_gt += interval_flag[i][0] img_meta['bid'] = torch.arange(0, interval_flag[i][1], 1, device=batch_instances_flp[i].device) + offset + 0.6 offset += interval_flag[i][1] # 4) # Concat original/rotated/flipped images and gts batch_gt_bboxes, batch_proposals = split_batch_list(batch_instances_all, interval_flag) batch_gt_bboxes_rot, batch_proposals_rot = split_batch_list(batch_instance_rot, interval_flag) batch_gt_bboxes_flp, batch_proposals_flp = split_batch_list(batch_instances_flp, interval_flag) proposals_valid_list_rot = [] for v in range(len(proposals_valid_list_0)): rot_theta = batch_proposals_rot[v][:,-1].mean() w,h,_ = img_metas[v]['img_shape'] img_xywha = batch_proposals_rot[v].new_tensor([w/2, h/2, w, h, rot_theta]) # (cx,cy,w,h,theta) iof_in_img = box_iou_rotated(batch_proposals_rot[v], img_xywha.unsqueeze(0), mode='iof') # iof_in_img = bbox_overlaps(pps_new.reshape(-1, 4), img_xyxy.unsqueeze(0), mode='iof') proposals_valid = iof_in_img > 0.8 proposals_valid_list_rot.append(proposals_valid) img_ori, batch_instances_gt_true = self.rotate_crop(img_ori, 0, self.crop_size,gt_true_bboxes, self.padding) batch_instances_gt_true_rot = copy.deepcopy(batch_instances_gt_true) _, batch_instances_gt_true_rot = self.rotate_crop(img_ori, rot, self.crop_size, batch_instances_gt_true_rot, self.padding) batch_instances_gt_true_flp = copy.deepcopy(batch_instances_gt_true) for i, img_meta in enumerate(img_metas_flp): batch_instances_gt_true_flp[i] = flip_tensor(batch_instances_gt_true_flp[i], img_ori.shape[2:4], 'vertical' ) batch_gt_bboxes_all = [] batch_proposals_all = [] img_metas_all = [] gt_true_bboxes_all = [] proposals_valid_list_all = [] gt_labels_all = gt_labels + gt_labels gt_bboxes_ignore_all = gt_bboxes_ignore + gt_bboxes_ignore if torch.rand(1) < 0.95: img_inputs_all = torch.cat( (img, img_rot)) for gt_box in batch_gt_bboxes + batch_gt_bboxes_rot: batch_gt_bboxes_all.append(gt_box) for proposal in batch_proposals + batch_proposals_rot: batch_proposals_all.append(proposal) for tmp_img_metas in img_metas + img_metas_rot: img_metas_all.append(tmp_img_metas) for gt_true in batch_instances_gt_true + batch_instances_gt_true_rot: gt_true_bboxes_all.append(gt_true) for proposal_valid in proposals_valid_list_0 + proposals_valid_list_rot: proposals_valid_list_all.append(proposal_valid) else: img_inputs_all = torch.cat( (img, img_flp)) for gt_box in batch_gt_bboxes + batch_gt_bboxes_flp: batch_gt_bboxes_all.append(gt_box) for proposal in batch_proposals + batch_proposals_flp: batch_proposals_all.append(proposal) for tmp_img_metas in img_metas + img_metas_flp: img_metas_all.append(tmp_img_metas) for gt_true in batch_instances_gt_true + batch_instances_gt_true_flp: gt_true_bboxes_all.append(gt_true) for proposal_valid in proposals_valid_list_0 + proposals_valid_list_0: proposals_valid_list_all.append(proposal_valid) return (img_inputs_all, batch_gt_bboxes_all, batch_proposals_all, img_metas_all, gt_labels_all, gt_true_bboxes_all, gt_bboxes_ignore_all, proposals_valid_list_all) def Cross_View_Diff_Sim(self, results_v1, results_v2, gt_labels, proposals_valid, double_view, mode = 'scales', stage = 0): gt_label = torch.cat(gt_labels) base_proposal_cfg = self.train_cfg.get('base_proposal',self.test_cfg.rpn) fine_proposal_cfg = self.train_cfg.get('fine_proposal',self.test_cfg.rpn) if mode == 'scales': num_base_scales = len(base_proposal_cfg['base_scales']) elif mode == 'ratios': num_base_scales = len(base_proposal_cfg['base_ratios']) elif mode == 'gts': num_base_scales = len(base_proposal_cfg['base_scales']) * len(base_proposal_cfg['base_ratios']) if stage >=1: if isinstance(fine_proposal_cfg['base_ratios'], tuple): num_base_scales = len(fine_proposal_cfg['base_ratios'][stage - 1]) else: num_base_scales = len(fine_proposal_cfg['base_ratios']) if not double_view: v1_half_num = len(results_v1['cls_score']) else: v1_half_num = len(results_v1['cls_score'])//2 cls_score_v1 = results_v1['cls_score'][:v1_half_num,...] ins_score_v1 = results_v1['ins_score'][:v1_half_num,...] # 取二者并集才是有效共同部分 proposal_vaild = torch.cat(proposals_valid).reshape(cls_score_v1.size(0),-1,1) if stage < 1: cls_score_v1_prob = cls_score_v1.softmax(dim=-1) elif stage >= 1: cls_score_v1_prob = cls_score_v1.sigmoid() cls_score_v1_prob = cls_score_v1_prob * proposal_vaild ins_score_v1_prob = ins_score_v1.softmax(dim=1) * proposal_vaild # cls_score_v1_prob = cls_score_v1_prob # ins_score_v1_prob = ins_score_v1.softmax(dim=1) ins_score_v1_prob = F.normalize(ins_score_v1_prob, dim=1, p=1) prob_v1 = (cls_score_v1_prob * ins_score_v1_prob).sum(dim=1) cls_score_v2 = results_v2['cls_score'] ins_score_v2 = results_v2['ins_score'] if stage < 1: cls_score_v2_prob = cls_score_v2.softmax(dim=-1) elif stage >= 1: cls_score_v2_prob = cls_score_v2.sigmoid() cls_score_v2_prob = cls_score_v2_prob * proposal_vaild ins_score_v2_prob = ins_score_v2.softmax(dim=1) * proposal_vaild ins_score_v2_prob = F.normalize(ins_score_v2_prob, dim=1, p=1) prob_v2 = (cls_score_v2_prob * ins_score_v2_prob).sum(dim=1) if stage>=1: cls_score_v1_prob_list = [] cls_score_v2_prob_list = [] ins_score_v1_prob_list = [] ins_score_v2_prob_list = [] for i in range(v1_half_num): cls_score_v1_prob_list.append(cls_score_v1_prob[i, ..., gt_label[i]].unsqueeze(0)) cls_score_v2_prob_list.append(cls_score_v2_prob[i, ..., gt_label[i]].unsqueeze(0)) ins_score_v1_prob_list.append(ins_score_v1_prob[i, ..., gt_label[i]].unsqueeze(0)) ins_score_v2_prob_list.append(ins_score_v2_prob[i, ..., gt_label[i]].unsqueeze(0)) cls_score_v1_prob = torch.cat(cls_score_v1_prob_list, dim=0) cls_score_v2_prob = torch.cat(cls_score_v2_prob_list, dim=0) ins_score_v1_prob = torch.cat(ins_score_v1_prob_list, dim=0) ins_score_v2_prob = torch.cat(ins_score_v2_prob_list, dim=0) cls_score_v1_prob = cls_score_v1_prob.reshape(cls_score_v1.size(0), num_base_scales, -1) ins_score_v1_prob = ins_score_v1_prob.reshape(ins_score_v1.size(0), num_base_scales, -1) cls_score_v2_prob = cls_score_v2_prob.reshape(cls_score_v2.size(0), num_base_scales, -1) ins_score_v2_prob = ins_score_v2_prob.reshape(ins_score_v2.size(0), num_base_scales, -1) cls_similarity = 1 - F.cosine_similarity(cls_score_v1_prob, cls_score_v2_prob, dim=-1, eps=1e-6) ins_similarity = 1 - F.cosine_similarity(ins_score_v1_prob, ins_score_v2_prob, dim=-1, eps=1e-6) score_similarity = 1 - F.cosine_similarity(prob_v1, prob_v2, dim=1, eps=1e-6) return cls_similarity, ins_similarity, score_similarity # def Cross_View_Sim(self, results_v1v2, gt_labels, proposals_valid_list, mode = 'scales', stage = 0): # gt_label = torch.cat(gt_labels) # half_num = len(gt_label)//2 # proposals_valid_all = torch.cat(proposals_valid_list) # half_num_vaild = len(proposals_valid_all)//2 # # with torch.no_grad(): # base_proposal_cfg = self.train_cfg.get('base_proposal',self.test_cfg.rpn) # fine_proposal_cfg = self.train_cfg.get('fine_proposal',self.test_cfg.rpn) # if mode == 'scales': # num_base_scales = len(base_proposal_cfg['base_scales']) # elif mode == 'ratios': # num_base_scales = len(base_proposal_cfg['base_ratios']) # elif mode == 'gts': # num_base_scales = len(base_proposal_cfg['base_scales']) * len(base_proposal_cfg['base_ratios']) # if stage >=1: # if isinstance(fine_proposal_cfg['base_ratios'], tuple): # num_base_scales = len(fine_proposal_cfg['base_ratios'][stage - 1]) # # shake_ratio = fine_proposal_cfg['shake_ratio'][stage - 1] # else: # num_base_scales = len(fine_proposal_cfg['base_ratios']) # # shake_ratio = fine_proposal_cfg['shake_ratio'] # cls_score_v1 = results_v1v2['cls_score'][:half_num,...] # [num_gt, num_pros, num_cls+1]) # ins_score_v1 = results_v1v2['ins_score'][:half_num,...] # proposal_vaild_v1 = proposals_valid_all[:half_num_vaild,...].reshape(half_num, -1) # proposal_vaild_v2 = proposals_valid_all[half_num_vaild:,...].reshape(half_num, -1) # proposal_vaild = proposal_vaild_v1 * proposal_vaild_v2 # if stage < 1: # cls_score_v1_prob = cls_score_v1.softmax(dim=-1) # elif stage >= 1: # cls_score_v1_prob = cls_score_v1.sigmoid() # cls_score_v1_prob = cls_score_v1_prob * proposal_vaild[...,None] # ins_score_v1_prob = ins_score_v1.softmax(dim=1) * proposal_vaild[...,None] # ins_score_v1_prob = F.normalize(ins_score_v1_prob, dim=1, p=1) # prob_v1 = (cls_score_v1_prob * ins_score_v1_prob).sum(dim=1) # cls_score_v2 = results_v1v2['cls_score'][half_num:,...] # ins_score_v2 = results_v1v2['ins_score'][half_num:,...] # if stage < 1: # cls_score_v2_prob = cls_score_v2.softmax(dim=-1) # elif stage >= 1: # cls_score_v2_prob = cls_score_v2.sigmoid() # cls_score_v2_prob = cls_score_v2_prob * proposal_vaild[...,None] # ins_score_v2_prob = ins_score_v2.softmax(dim=1) * proposal_vaild[...,None] # ins_score_v2_prob = F.normalize(ins_score_v2_prob, dim=1, p=1) # prob_v2 = (cls_score_v2_prob * ins_score_v2_prob).sum(dim=1) # if stage >= 1: # cls_score_v1_prob_list = [] # cls_score_v2_prob_list = [] # ins_score_v1_prob_list = [] # ins_score_v2_prob_list = [] # for i in range(half_num): # cls_score_v1_prob_list.append(cls_score_v1_prob[i, ..., gt_label[i]].unsqueeze(0)) # cls_score_v2_prob_list.append(cls_score_v2_prob[i, ..., gt_label[i]].unsqueeze(0)) # ins_score_v1_prob_list.append(ins_score_v1_prob[i, ..., gt_label[i]].unsqueeze(0)) # ins_score_v2_prob_list.append(ins_score_v2_prob[i, ..., gt_label[i]].unsqueeze(0)) # cls_score_v1_prob = torch.cat(cls_score_v1_prob_list, dim=0) # cls_score_v2_prob = torch.cat(cls_score_v2_prob_list, dim=0) # ins_score_v1_prob = torch.cat(ins_score_v1_prob_list, dim=0) # ins_score_v2_prob = torch.cat(ins_score_v2_prob_list, dim=0) # cls_score_v1_prob = cls_score_v1_prob.reshape(cls_score_v1.size(0), num_base_scales, -1) # # cls_score_v1_prob = cls_score_v1_prob * proposal_vaild_v1 # ins_score_v1_prob = ins_score_v1_prob.reshape(ins_score_v1.size(0), num_base_scales, -1) # cls_score_v2_prob = cls_score_v2_prob.reshape(cls_score_v2.size(0), num_base_scales, -1) # ins_score_v2_prob = ins_score_v2_prob.reshape(ins_score_v2.size(0), num_base_scales, -1) # cls_similarity = 1 - F.cosine_similarity(cls_score_v1_prob, cls_score_v2_prob, dim=-1, eps=1e-6) # ins_similarity = 1 - F.cosine_similarity(ins_score_v1_prob, ins_score_v2_prob, dim=-1, eps=1e-6) # score_similarity = 1 - F.cosine_similarity(prob_v1, prob_v2, dim=1, eps=1e-6) # return cls_similarity, ins_similarity, score_similarity def forward_train(self, img, img_metas, gt_bboxes, gt_true_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None, proposals=None, **kwargs): if self.iter_count == self.burn_in_steps1: self.roi_head.use_angle_loss = True print(f'#####iter_count1 use_angle_loss:{self.iter_count}#####') if self.construct_resize: self.construct_resize = False if self.iter_count == self.burn_in_steps2: if self.roi_head.use_angle_loss: self.roi_head.add_angle_pred_begin = True print(f'#####iter_count2 add_angle_pred_begin:{self.iter_count}#####') base_proposal_cfg = self.train_cfg.get('base_proposal', self.test_cfg.rpn) fine_proposal_cfg = self.train_cfg.get('fine_proposal', self.test_cfg.rpn) losses = dict() gt_points = [bbox_xyxy_to_cxcywh(b)[:, :2] for b in gt_bboxes] if self.stage == 0:
generate_proposals_0, proposals_valid_list_0 = gen_proposals_from_cfg(gt_points, base_proposal_cfg,
5
2023-11-20 07:50:12+00:00
16k
wangermeng2021/llm-webui
main.py
[ { "identifier": "login_huggingface", "path": "src/utils/common.py", "snippet": "def login_huggingface(token,base_model_name_dropdown):\n if base_model_name_dropdown.lower().find(\"llama\") >= 0:\n if token:\n HUGGINGFACE_HUB_TOKEN = token\n print(\"d1:\",HUGGINGFACE_HUB_T...
import pandas as pd import math import numpy as np import gc import os,requests import subprocess,threading import time import gradio as gr import os import traceback import numpy as np import glob import shutil import torch import socket from src.utils.common import login_huggingface from src.finetune.huggingface_inference import HuggingfaceInference from src.finetune.llama_cpp_inference import LlamaCppInference from src.rag.qa_with_rag import QAWithRAG from src.utils.common import read_yaml,get_first_row_from_dataset,\ get_runs_model_names_from_dir,get_hg_model_names_from_dir,get_hg_model_names_and_gguf_from_dir,validate_model_path,get_runs_models from src.utils.chat_prompts import get_model_type,get_chat_history_prompt,get_model_prompt_template from transformers.training_args import OptimizerNames from huggingface_hub import hf_hub_download from src.utils import download_model from pathlib import Path from src.finetune.qlora_trainer import QloraTrainer from src.finetune.qlora_trainer import TRAINING_STATUS from src.utils.download_huggingface_repo import download_model_wrapper,download_dataset_wrapper
13,880
try: login_huggingface(Huggingface_hub_token,base_model_name_dropdown) except Exception as e: raise gr.Error(e) def download_hub_home_chat_model_postprocess(): return gr.update(visible=True), gr.update(visible=False) def click_download_hub_home_chat_model_btn(): return gr.update(visible=False), gr.update(visible=True), gr.update(visible=True) def click_stop_download_hub_home_chat_model_names_btn(): return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False) def click_stop_download_hub_home_chat_model_names_btn(): return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False) def change_home_chat_model_source_radio(home_chat_model_source_radio, hub_home_chat_model_names_dropdown): local_home_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") if home_chat_model_source_radio == "Download From Huggingface Hub": if not hub_home_chat_model_names_dropdown: model_download_status = '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;No model is selected.</span>' else: if validate_model_path(hub_home_chat_model_names_dropdown)[0]: model_download_status = '<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local,click load model to run.</span>' else: model_download_status = '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>' return gr.update(visible=True), gr.update(visible=False), gr.update( visible=False), gr.update(visible=True, value=model_download_status), gr.update( visible=True), gr.update( visible=False) else: model_download_status = "" return gr.update(visible=False), gr.update(visible=True), gr.update( visible=True), gr.update(visible=False, value=model_download_status), gr.update( visible=False), gr.update( visible=False) click_download_hub_home_chat_model_names_btn_event = download_hub_home_chat_model_names_btn.click( check_local_model_or_dataset_is_empty1, [hub_home_chat_model_names_dropdown,Huggingface_hub_token]).success( click_download_hub_home_chat_model_btn, [], [download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn, download_hub_home_chat_model_status_markdown]).then( download_model_wrapper, [hub_home_chat_model_names_dropdown, local_home_chat_model_root_dir_textbox], download_hub_home_chat_model_status_markdown). \ then(download_hub_home_chat_model_postprocess, [], [download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn]) stop_download_hub_home_chat_model_names_btn.click(click_stop_download_hub_home_chat_model_names_btn, [], [download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn, download_hub_home_chat_model_status_markdown], cancels=[ click_download_hub_home_chat_model_names_btn_event]) home_chat_model_source_radio.change(change_home_chat_model_source_radio, [home_chat_model_source_radio, hub_home_chat_model_names_dropdown], [hub_home_chat_model_names_dropdown, local_home_chat_model_names_dropdown, refresh_local_home_chat_model_names_btn, download_hub_home_chat_model_status_markdown, download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn], cancels=[click_download_hub_home_chat_model_names_btn_event]) def change_refresh_local_home_chat_model_names_btn(): local_home_chat_model_names = get_hg_model_names_and_gguf_from_dir(local_home_chat_model_dir,runs_model_root_dir) return gr.update(choices=local_home_chat_model_names,value = local_home_chat_model_names[0] if local_home_chat_model_names else None) refresh_local_home_chat_model_names_btn.click(change_refresh_local_home_chat_model_names_btn,[],[local_home_chat_model_names_dropdown]) def change_hub_home_chat_model_names_dropdown(hub_home_chat_model_names_dropdown): if not hub_home_chat_model_names_dropdown: return gr.update(visible=True, value='<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;No model is selected.</span>'), \ gr.update(visible=True), gr.update(visible=False) if validate_model_path(hub_home_chat_model_names_dropdown)[0]: return gr.update( visible=True, value='<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local,click load model to run.</span>'), \ gr.update(visible=True), gr.update(visible=False) else: return gr.update(visible=True, value='<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>'), \ gr.update(visible=True), gr.update(visible=False) hub_home_chat_model_names_dropdown.change(change_hub_home_chat_model_names_dropdown, hub_home_chat_model_names_dropdown, [download_hub_home_chat_model_status_markdown, download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn], cancels=[click_download_hub_home_chat_model_names_btn_event]) def click_load_home_chat_model_btn(home_chat_model_source_radio, hub_home_chat_model_names_dropdown, local_home_chat_model_names_dropdown, max_new_tokens_slider, temperature_slider, top_k_slider, top_p_slider, repeat_penalty_slider, chat_history_window_slider,using_4bit_quantization_checkbox,low_cpu_mem_usage_checkbox, progress=gr.Progress()): if home_chat_model_source_radio == "Download From Huggingface Hub": cur_model_name = hub_home_chat_model_names_dropdown else: cur_model_name = local_home_chat_model_names_dropdown if not validate_model_path(cur_model_name)[0]: raise gr.Error(f"Model does not exist!") global infer_model global stop_generation_status stop_generation_status = True progress(0.6) if infer_model: infer_model.free_memory() infer_model = None torch.cuda.empty_cache() yield "Loading model ..." load_model_status = 0 model_path = validate_model_path(cur_model_name)[1] if model_path.split('.')[-1] == "gguf":
# os.environ['HTTP_PROXY'] = 'http://127.0.0.1:8889' # os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:8889' LOCAL_HOST_IP = "0.0.0.0" TENSORBOARD_URL = "http://" + LOCAL_HOST_IP + ":6006/" INIT_DATASET_NAME = "test_python_code_instructions_5000_rows" RAG_DATA_LIST_DROPDOWN = "" TEXT_SPLITTER_DROPDOWN = "" CHUNK_SIZE_SLIDER = 0 CHUNK_OVERLAP_SLIDER = -1 SEPARATORS_TEXTBOX = "" EMBEDDING_MODEL_SOURCE_RADIO = "" HUB_EMBEDDING_MODEL_NAMES_DROPDOWN = "" LOCAL_EMBEDDING_MODEL_NAMES_DROPDOWN = "" CHAT_MODEL_SOURCE_RADIO = "" HUB_CHAT_MODEL_NAMES_DROPDOWN = "" LOCAL_CHAT_MODEL_NAMES_DROPDOWN = "" SEARCH_TOP_K_SLIDER = "" SEARCH_SCORE_THRESHOLD_SLIDER = "" training_ret_val = -1 error_msg = "" current_running_model_name = "" infer_model = None stop_generation_status = False chatbot_history=[] chatbot_height = 500 rag_chatbot_history=[] rag_stop_generation_status = False qa_with_rag = QAWithRAG() train_param_config = {} train_param_config["dataset"]={} train_param_config["model"]={} train_param_config["training"]={} model_zoo_config = {} transformer_optimizer_list = [] model_context_window = 0 init_train_file_path = None init_val_file_path = None INIT_PREFIX1 = "" INIT_PREFIX2 = "" INIT_PREFIX3 = "" INIT_PREFIX4 = "" INIT_COL1_TEXT = "" INIT_COL2_TEXT = "" INIT_COL3_TEXT = "" INIT_COL4_TEXT = "" col_names = [] DATASET_FIRST_ROW = None local_model_list = "" local_model_root_dir = "" base_model_names = [] training_base_model_names = [] embedding_model_names = [] base_model_context_window = [] local_dataset_list = [] local_dataset_root_dir = "" def get_local_embedding_model_list(): local_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "rag", "embedding_models") local_model_root_files = os.listdir(local_model_root_dir) local_model_list = [] for model_dir in local_model_root_files: if os.path.isdir(os.path.join(local_model_root_dir, model_dir)): local_model_list.append(model_dir) return local_model_list,local_model_root_dir def get_local_model_list(): local_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") local_model_root_files = os.listdir(local_model_root_dir) local_model_list = [] for model_dir in local_model_root_files: if os.path.isdir(os.path.join(local_model_root_dir, model_dir)): local_model_list.append(model_dir) return local_model_list,local_model_root_dir def get_local_dataset_list(): local_dataset_list = [] local_dataset_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "datasets") matched_dataset_file_path_list = glob.glob(os.path.join(local_dataset_root_dir,"**","dataset_infos.json"),recursive=False) for matched_file_path in matched_dataset_file_path_list: matched_pos1 = matched_file_path.rfind("datasets") matched_pos2 = matched_file_path.rfind("dataset_infos.json") local_dataset_list.append(matched_file_path[matched_pos1 + 9:matched_pos2-1]) matched_dataset_file_path_list = glob.glob(os.path.join(local_dataset_root_dir,"**","dataset_dict.json"),recursive=False) for matched_file_path in matched_dataset_file_path_list: matched_pos1 = matched_file_path.rfind("datasets") matched_pos2 = matched_file_path.rfind("dataset_dict.json") local_dataset_list.append(matched_file_path[matched_pos1 + 9:matched_pos2-1]) return local_dataset_list,local_dataset_root_dir def start_tensorboard_server(): try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((LOCAL_HOST_IP, 6006)) s.close() except Exception as e: tensorboard_cmd = f"tensorboard --logdir {os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs')} --reload_multifile True" tensorboard_proc = subprocess.Popen(tensorboard_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, close_fds=True) # bufsize=0, close_fds=True def init(): global config_dict,transformer_optimizer_list,model_context_window,init_train_file_path,init_val_file_path global INIT_PREFIX1,INIT_COL1_TEXT,INIT_PREFIX2,INIT_COL2_TEXT,INIT_PREFIX3,INIT_COL3_TEXT,INIT_PREFIX4,INIT_COL4_TEXT,col_names,DATASET_FIRST_ROW global local_model_list,local_model_root_dir global base_model_names,base_model_context_window,embedding_model_names,training_base_model_names global local_dataset_list, local_dataset_root_dir start_tensorboard_server() model_zoo_config = read_yaml(os.path.join(os.path.dirname(os.path.abspath(__file__)),"config","model_zoo.yaml")) transformer_optimizer_list = list(vars(OptimizerNames)["_value2member_map_"].keys()) #get dynamic context window from selected model model_context_window = [2048,1024,512] init_train_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "datasets", INIT_DATASET_NAME) DATASET_FIRST_ROW,split_list = get_first_row_from_dataset(init_train_file_path) col_names = list(DATASET_FIRST_ROW) col_names.insert(0,"") INIT_PREFIX1 = "<s>[INST] " INIT_PREFIX2 = "here are the inputs " INIT_PREFIX3 = " [/INST]" INIT_PREFIX4 = "</s>" INIT_COL1_TEXT = str(DATASET_FIRST_ROW[col_names[1]]) INIT_COL2_TEXT = str(DATASET_FIRST_ROW[col_names[2]]) INIT_COL3_TEXT = str(DATASET_FIRST_ROW[col_names[3]]) INIT_COL4_TEXT = "" local_model_list,local_model_root_dir = get_local_model_list() base_model_names = [model_name for model_name in model_zoo_config["model_list"]] training_base_model_names = [model_name for model_name in base_model_names if not model_name.endswith(".gguf")] # base_model_context_window = [model_name[1] for model_name in model_zoo_config["model_list"]] embedding_model_names = [model_name for model_name in model_zoo_config["embedding_model_list"]] local_dataset_list, local_dataset_root_dir = get_local_dataset_list() with gr.Blocks(title="FINETUNE",css="#vertical_center_align_markdown { position:absolute; top:30%;background-color:white;} .white_background {background-color: #ffffff} .none_border {border: none;border-collapse:collapse;}") as demo: init() local_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) local_dataset_root_dir_textbox = gr.Textbox(label="",value=local_dataset_root_dir, visible=False) local_embedding_model_root_dir_textbox = gr.Textbox(label="", value=os.path.join(os.path.dirname(os.path.abspath(__file__)), "rag", "embedding_models"), visible=False) local_chat_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) local_home_chat_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) session_state = gr.State(value={}) # html = gr.HTML("<p align='center';>llm-web-ui</p>",elem_id="header") with gr.Tab("Home"): with gr.Row(): # with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;ChatBot", elem_classes="white_background") with gr.Group(): gr.Markdown("### &nbsp;&nbsp;&nbsp;&nbsp;Chat Model", elem_classes="white_background") local_home_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") runs_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runs") local_home_chat_model_names = get_hg_model_names_and_gguf_from_dir(local_home_chat_model_dir, runs_model_root_dir) home_chat_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_home_chat_model_dir})"] home_chat_model_source_radio = gr.Radio(home_chat_model_source_radio_choices, label="Chat Model source", show_label=False, value=home_chat_model_source_radio_choices[0], interactive=True) with gr.Row(): hub_home_chat_model_names_dropdown = gr.Dropdown(base_model_names, label=f"Chat Model", show_label=False, allow_custom_value=True, value=base_model_names[ 0] if base_model_names else None, interactive=True, scale=4, min_width=1) local_home_chat_model_names_dropdown = gr.Dropdown(local_home_chat_model_names, label=f"Chat Model", show_label=False, value=local_home_chat_model_names[ 0] if local_home_chat_model_names else None, interactive=True, scale=4, min_width=1, visible=False) download_hub_home_chat_model_names_btn = gr.Button("Download", scale=1) stop_download_hub_home_chat_model_names_btn = gr.Button("Stop", scale=1, visible=False) refresh_local_home_chat_model_names_btn = gr.Button("Refresh", scale=1, visible=False) load_home_chat_model_btn = gr.Button("Load Model", scale=1, visible=True) using_4bit_quantization_checkbox = gr.Checkbox(True, label="Using 4-bit quantization", interactive=True, visible=True, info="Less memory but slower", scale=1 ) if validate_model_path(base_model_names[0])[0]: download_hub_home_chat_model_status_markdown = gr.Markdown( '<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local,click load model to run.</span>') else: download_hub_home_chat_model_status_markdown = gr.Markdown( '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') # home_chat_model_running_status_markdown = gr.Markdown( # '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') with gr.Row(): chatbot = gr.Chatbot(value=[],bubble_full_width=False,rtl=False,layout="panel",height=chatbot_height, avatar_images=((os.path.join(os.path.abspath(''),"pics", "user1.png")), (os.path.join(os.path.abspath(''),"pics", "bot4.png"))), ) with gr.Row(): input_txtbox = gr.Textbox( show_label=False,autofocus=True, placeholder="Enter text and press enter",scale=3 ) generate_btn = gr.Button("Generate", scale=1) stop_btn = gr.Button("Stop", scale=1) # clear_btn = gr.Button("Clear",scale=1) with gr.Tab("Fine-Tuning"): with gr.Tabs() as tensorboard_tab: with gr.TabItem("Training", id=0): with gr.Row(): with gr.Column(scale=1, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;1.Training", elem_classes="white_background") with gr.Group(): gr.Markdown("### &nbsp;1).Model", elem_classes="white_background") with gr.Group(): # gr.Markdown("<br> &nbsp;&nbsp;&nbsp; Base Model") base_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_model_root_dir})"] base_model_source_radio = gr.Radio(base_model_source_radio_choices, label="Base Model", value=base_model_source_radio_choices[0], interactive=True) with gr.Row(elem_classes="white_background"): base_model_name_dropdown = gr.Dropdown(training_base_model_names, label="Model Name", value=training_base_model_names[0] if training_base_model_names else None, interactive=True, visible=True, scale=5, allow_custom_value=True) download_local_model_btn = gr.Button("Download", scale=1, visible=True) stop_download_local_model_btn = gr.Button("Stop", scale=1, visible=False) # model_download_status = gr.Markdown("<div id='vertical_center_align_markdown'><p style='text-align: center;'>Not downloaded</p></div>", elem_classes="white_background",scale=1,full_width=True,visible=False) if validate_model_path(training_base_model_names[0])[0]: download_model_status_markdown = gr.Markdown('<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local.</span>') else: download_model_status_markdown = gr.Markdown('<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') with gr.Row(): # local_home_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") # runs_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runs") # local_model_list = get_hg_model_names_and_gguf_from_dir(local_home_chat_model_dir,runs_model_root_dir) local_model_list = get_hg_model_names_from_dir(os.path.dirname(os.path.abspath(__file__)), "models") local_model_dropdown = gr.Dropdown(local_model_list, label="Local Model", info="", value=local_model_list[0] if len(local_model_list) > 0 else None, interactive=True, elem_classes="white_background", scale=5, visible=False) refresh_local_model_list_btn = gr.Button("Refresh", scale=1, visible=False) fine_tuning_type_dropdown = gr.Dropdown(["QLoRA", "LoRA"], label="Fine-Tuning Type", info="", value="QLoRA", interactive=True) with gr.Group(): with gr.Row(elem_classes="white_background"): # gr.Markdown("### &nbsp;&nbsp;&nbsp; LoRA Config", elem_classes="white_background") lora_r_list = [str(ri) for ri in range(8, 65, 8)] lora_r_slider = gr.Slider(8, 64, value=8, step=8, label="lora_r", interactive=True) # lora_r_dropdown = gr.Dropdown(lora_r_list,label="lora_r", value=lora_r_list[0],interactive=True,allow_custom_value=True) lora_alpha_slider = gr.Slider(8, 96, value=32, step=8, label="lora_alpha", interactive=True) # lora_alpha_list = [str(ri) for ri in range(8, 97, 8)] # lora_alpha_dropdown = gr.Dropdown(lora_alpha_list,label="lora_alpha", value=lora_alpha_list[3],interactive=True,allow_custom_value=True) with gr.Row(elem_classes="white_background"): lora_dropout_slider = gr.Slider(0, 1, value=0.05, step=0.01, label="lora_dropout", interactive=True) lora_bias_dropdown = gr.Dropdown(["none", "all", "lora_only"], label="lora_bias", info="", value="none", interactive=True) with gr.Group(): gr.Markdown("### &nbsp;2).Dataset",elem_classes="white_background") dataset_source_radio_choices = ["Download From Huggingface Hub", f"From Local HG Dataset In {local_dataset_root_dir})"] dataset_source_radio = gr.Radio(dataset_source_radio_choices, label="Dataset Source", value=dataset_source_radio_choices[1], interactive=True) with gr.Row(equal_height=True): hg_dataset_path_textbox = gr.Textbox(label="Dataset Name:",elem_classes="none_border",visible=False, interactive=True, scale=4, value="iamtarun/python_code_instructions_18k_alpaca") download_local_dataset_btn = gr.Button("Download", scale=1, visible=False) stop_download_local_dataset_btn = gr.Button("Stop", scale=1, visible=False) download_dataset_status_markdown = gr.Markdown('') with gr.Row(): hg_train_dataset_dropdown = gr.Dropdown(["train"], label="Train set", info="", interactive=False,visible=False, elem_classes="white_background", scale=1,value="train") hg_val_dataset_dropdown = gr.Dropdown([], label="Val set", info="", interactive=False,visible=False, elem_classes="white_background", scale=1) with gr.Row(): local_dataset_list.pop( local_dataset_list.index(INIT_DATASET_NAME)) local_dataset_list.insert(0, INIT_DATASET_NAME) local_train_path_dataset_dropdown = gr.Dropdown(local_dataset_list, label="Train Dataset", info="", value=local_dataset_list[0] if len(local_dataset_list)>0 else None, interactive=True, elem_classes="white_background", scale=5, visible=True) refresh_local_train_path_dataset_list_btn = gr.Button("Refresh", scale=1, visible=True) with gr.Row(): local_train_dataset_dropdown = gr.Dropdown(["train"], label="Train set", info="", interactive=True, elem_classes="white_background", scale=1,value="train",visible=True) local_val_dataset_dropdown = gr.Dropdown([], label="Val set", info="", interactive=True, elem_classes="white_background", scale=1,visible=True) with gr.Group(elem_classes="white_background"): # gr.Markdown("<h4><br> &nbsp;&nbsp;Prompt Template: (Prefix1 + ColumnName1 + Prefix2 + ColumnName2)</h4>",elem_classes="white_background") gr.Markdown("<br> &nbsp;&nbsp;&nbsp;&nbsp;**Prompt Template: (Prefix1+ColumnName1+Prefix2+ColumnName2+Prefix3+ColumnName3+Prefix4+ColumnName4)**",elem_classes="white_background") gr.Markdown( "<span> &nbsp;&nbsp;&nbsp;&nbsp;**Note**:&nbsp;&nbsp;Llama2/Mistral Chat Template:<s\>[INST] instruction+input [/INST] output</s\> </span>",elem_classes="white_background") # using_llama2_chat_template_checkbox = gr.Checkbox(True, label="Using Llama2/Mistral chat template",interactive=True,visible=False) with gr.Row(elem_classes="white_background"): # prompt_template prefix1_textbox = gr.Textbox(label="Prefix1:",value=INIT_PREFIX1,lines=2,interactive=True,elem_classes="white_background") datatset_col1_dropdown = gr.Dropdown(col_names, label="ColumnName1:", info="",value=col_names[1],interactive=True,elem_classes="white_background") prefix2_textbox = gr.Textbox(label="Prefix2:",value=INIT_PREFIX2,lines=2,interactive=True,elem_classes="white_background") datatset_col2_dropdown = gr.Dropdown(col_names, label="ColumnName2:", info="",value=col_names[2],interactive=True,elem_classes="white_background") with gr.Row(elem_classes="white_background"): prefix3_textbox = gr.Textbox(label="Prefix3:",value=INIT_PREFIX3,lines=2,interactive=True,elem_classes="white_background") datatset_col3_dropdown = gr.Dropdown(col_names, label="ColumnName3:", info="",value=col_names[3],interactive=True,elem_classes="white_background") prefix4_textbox = gr.Textbox(label="Prefix4:",value=INIT_PREFIX4,lines=2,interactive=True,elem_classes="white_background") datatset_col4_dropdown = gr.Dropdown(col_names, label="ColumnName4:", info="",value=col_names[0],interactive=True,elem_classes="white_background") # print("") prompt_sample = INIT_PREFIX1 + INIT_COL1_TEXT + INIT_PREFIX2 + INIT_COL2_TEXT + INIT_PREFIX3 + INIT_COL3_TEXT + INIT_PREFIX4 + INIT_COL4_TEXT prompt_sample_textbox = gr.Textbox(label="Prompt Sample:",interactive=False,value=prompt_sample,lines=4) max_length_dropdown = gr.Dropdown(["Model Max Length"]+model_context_window, label="Max Length",value="Model Max Length", interactive=True,allow_custom_value=True) with gr.Group(): gr.Markdown("### &nbsp;3).Training Arguments",elem_classes="white_background") with gr.Row(elem_classes="white_background"): epochs_slider = gr.Slider(1, 100, value=10, step=1, label="Epochs", interactive=True) # epochs_dropdown = gr.Dropdown([1]+[bi for bi in range(10,101,10)], label="Epochs",value=1, interactive=True,allow_custom_value=True) batch_size_list = [1,2,3]+[bi for bi in range(4,32+1,4)] batch_size_slider = gr.Slider(1, 100, value=1, step=1, label="Batch Size", interactive=True) # batch_size_dropdown = gr.Dropdown(batch_size_list,label="Batch Size", info="",value=batch_size_list[0],interactive=True,allow_custom_value=True) # learning_rate_textbox = gr.Textbox(label="Learning Rate", value=2e-4,interactive=True) with gr.Row(elem_classes="white_background"): learning_rate_slider = gr.Slider(0, 0.01, value=2e-4, step=0.0001, label="Learning Rate", interactive=True) warmup_steps_slider = gr.Slider(0, 400, value=100, step=10, label="Warmup Steps", interactive=True) with gr.Row(elem_classes="white_background"): optimizer_dropdown = gr.Dropdown(transformer_optimizer_list, label="Optimizer", info="", value=transformer_optimizer_list[1], interactive=True) lr_scheduler_list = ["linear","cosine","cosine_with_hard_restarts","polynomial_decay","constant","constant_with_warmup","inverse_sqrt","reduce_on_plateau"] lr_scheduler_type_dropdown = gr.Dropdown(lr_scheduler_list, label="LR Scheduler Type", info="", value=lr_scheduler_list[0], interactive=True) with gr.Row(elem_classes="white_background"): early_stopping_patience_slider = gr.Slider(0, 50+1, value=0, step=5, label="Early Stopping Patience", interactive=True) gradient_accumulation_steps_slider = gr.Slider(1, 50, value=1, step=1, label="Gradient Accumulation Steps") with gr.Row(elem_classes="white_background"): eval_steps_slider = gr.Slider(0, 1000, value=100, step=100, label="eval_steps", interactive=True) gradient_checkpointing_checkbox = gr.Checkbox(True,label="Gradient Checkpointing",interactive=True) train_btn = gr.Button("Start Training") with gr.Column(scale=1, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;2.Test",elem_classes="white_background") training_runs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs') run_names = os.listdir(training_runs_dir) run_names.sort(key=lambda file:os.path.getmtime(os.path.join(training_runs_dir,file))) runs_output_model = [] for run_name in run_names: run_name_dir = os.path.join(training_runs_dir,run_name) run_output_model = os.path.join(run_name_dir,"output_model") if os.path.exists(run_output_model): run_output_model_names = os.listdir(run_output_model) for run_output_model_name in run_output_model_names: if run_output_model_name.find("merged_")>=0: runs_output_model.append(os.path.join(run_name,"output_model",run_output_model_name, "ori")) runs_output_model = runs_output_model[::-1] runs_output_model_dropdown = gr.Dropdown(runs_output_model, label="runs_output_model", value=runs_output_model[0] if runs_output_model else None, interactive=True) gr.Markdown("") gr.Markdown( "<span> &nbsp;&nbsp;&nbsp;&nbsp;**Note**:&nbsp;&nbsp;Llama2/Mistral Chat Template:<s\>[INST] instruction+input [/INST] output</s\> </span>", elem_classes="white_background") with gr.Row(): test_input_textbox = gr.Textbox(label="Input:", interactive=True, value="", lines=4, scale=4) generate_text_btn = gr.Button("Generate",scale=1) finetune_test_using_4bit_quantization_checkbox = gr.Checkbox(True, label="Using 4-bit quantization", interactive=True, visible=True, info="Less memory but slower", scale=1 ) # test_prompt = gr.Textbox(label="Prompt:", interactive=False, lines=2, scale=1) test_output = gr.Textbox(label="Output:", interactive=False,lines=4, scale=1) # def change_test_input_textbox(test_prefix1_textbox,test_input_textbox,test_prefix2_textbox): # return gr.update(value=test_prefix1_textbox+test_input_textbox+test_prefix2_textbox) # test_input_textbox.change(change_test_input_textbox,[test_prefix1_textbox,test_input_textbox,test_prefix2_textbox],test_prompt) with gr.Group(): gr.Markdown("## &nbsp;3.Quantization",elem_classes="white_background") with gr.Row(): quantization_type_list = ["gguf"] quantization_type_dropdown = gr.Dropdown(quantization_type_list, label="Quantization Type",value=quantization_type_list[0], interactive=True,scale=3) local_quantization_dataset_dropdown = gr.Dropdown(local_dataset_list, label="Dataset for quantization", value=local_dataset_list[0] if len( local_dataset_list) > 0 else None, interactive=True, elem_classes="white_background", scale=7, visible=False) refresh_local_quantization_dataset_btn = gr.Button("Refresh", scale=2, visible=False) def click_refresh_local_quantization_dataset_btn(): local_dataset_list, _ = get_local_dataset_list() return gr.update(choices=local_dataset_list, value=local_dataset_list[0] if len(local_dataset_list) > 0 else "") refresh_local_quantization_dataset_btn.click(click_refresh_local_quantization_dataset_btn,[],local_quantization_dataset_dropdown) with gr.Row(): training_runs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs') run_names = os.listdir(training_runs_dir) run_names.sort(key=lambda file: os.path.getmtime(os.path.join(training_runs_dir, file))) runs_output_model = [] for run_name in run_names: run_name_dir = os.path.join(training_runs_dir, run_name) run_output_model = os.path.join(run_name_dir, "output_model") if os.path.exists(run_output_model): run_output_model_names = os.listdir(run_output_model) for run_output_model_name in run_output_model_names: if run_output_model_name.find("merged_") >= 0: runs_output_model.append( os.path.join(run_name, "output_model", run_output_model_name, "ori")) runs_output_model = runs_output_model[::-1] quantization_runs_output_model_dropdown = gr.Dropdown(runs_output_model, label="runs_output_model", value=runs_output_model[ 0] if runs_output_model else None, interactive=True, scale=6) quantize_btn = gr.Button("Quantize", scale=1,visible=False) if runs_output_model: model_name = runs_output_model[0].split(os.sep)[-2].split('_')[-1] quantized_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs', os.sep.join(runs_output_model[0].split(os.sep)[0:-1]), "quantized_" + quantization_type_list[0] + "_" + model_name) if not os.path.exists(quantized_model_dir): os.makedirs(quantized_model_dir) quantization_logging_markdown = gr.Markdown("") gguf_quantization_markdown0 = gr.Markdown("### &nbsp;&nbsp;&nbsp;&nbsp;GGUF Quantization Instruction:", elem_classes="white_background", visible=True) gguf_quantization_markdown1 = gr.Markdown('''&nbsp;&nbsp;&nbsp;&nbsp;1.Follow the instructions in the llama.cpp to generate a GGUF:[https://github.com/ggerganov/llama.cpp#prepare-data--run](https://github.com/ggerganov/llama.cpp#prepare-data--run),<span style="color:red">&nbsp;&nbsp;Q4_K_M is recommend</span>''',visible=True) if runs_output_model: gguf_quantization_markdown2 = gr.Markdown(f"&nbsp;&nbsp;&nbsp;&nbsp;2.Convert {runs_output_model[0]} to gguf model",visible=True) else: gguf_quantization_markdown2 = gr.Markdown( f"", visible=True) gguf_quantization_markdown3 = gr.Markdown(f"&nbsp;&nbsp;&nbsp;&nbsp;3.Deploy gguf model", visible=False) else: quantization_logging_markdown = gr.Markdown("") gguf_quantization_markdown0 = gr.Markdown("### &nbsp;&nbsp;&nbsp;&nbsp;GGUF Quantization Instruction:", elem_classes="white_background", visible=True) gguf_quantization_markdown1 = gr.Markdown('''''',visible=True) gguf_quantization_markdown2 = gr.Markdown(f"",visible=True) gguf_quantization_markdown3 = gr.Markdown(f"", visible=True) with gr.Group(visible=False): gr.Markdown("## &nbsp;4.Deploy",elem_classes="white_background") with gr.Row(): deployment_framework_dropdown = gr.Dropdown(["TGI","llama-cpp-python"], label="Deployment Framework",value="TGI", interactive=True) with gr.Row(): training_runs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs') run_names = os.listdir(training_runs_dir) run_names.sort(key=lambda file: os.path.getmtime(os.path.join(training_runs_dir, file))) # ori_model_runs_output_model = [] tgi_model_format_runs_output_model = [] gguf_model_format_runs_output_model = [] for run_name in run_names: run_name_dir = os.path.join(training_runs_dir, run_name) run_output_model = os.path.join(run_name_dir, "output_model") if os.path.exists(run_output_model): run_output_model_names = os.listdir(run_output_model) for run_output_model_name in run_output_model_names: model_bin_path = os.path.exists( os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs', run_name, "output_model", run_output_model_name, "ori", "pytorch_model.bin")) if run_output_model_name.find("merged_") >= 0 and model_bin_path: tgi_model_format_runs_output_model.append( os.path.join(run_name, "output_model", run_output_model_name, "ori")) gptq_model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs',run_name, "output_model", run_output_model_name, "quantized_gptq_"+run_output_model_name.split('_')[-1], "pytorch_model.bin") if os.path.exists(gptq_model_path): tgi_model_format_runs_output_model.append(os.path.join(run_name, "output_model", run_output_model_name, "quantized_gptq_"+run_output_model_name.split('_')[-1])) gguf_model_dir = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'runs', run_name, "output_model", run_output_model_name, "quantized_gguf_" + run_output_model_name.split('_')[-1]) if os.path.exists(gguf_model_dir): gguf_model_names = os.listdir(gguf_model_dir) for gguf_model_name in gguf_model_names: if gguf_model_name.split('.')[-1] == "gguf": gguf_model_format_runs_output_model.append( os.path.join(run_name, "output_model", run_output_model_name, "quantized_gguf_" + run_output_model_name.split('_')[-1], gguf_model_name)) tgi_model_format_runs_output_model = tgi_model_format_runs_output_model[::-1] gguf_model_format_runs_output_model = gguf_model_format_runs_output_model[::-1] deployment_runs_output_model_dropdown = gr.Dropdown(tgi_model_format_runs_output_model, label="runs_output_model", value=tgi_model_format_runs_output_model[ 0] if tgi_model_format_runs_output_model else None, interactive=True,scale=6) refresh_deployment_runs_output_model_btn = gr.Button("Refresh", scale=1, visible=True) if tgi_model_format_runs_output_model: model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs', os.path.dirname(tgi_model_format_runs_output_model[0])) model_name = os.path.basename(tgi_model_format_runs_output_model[0]) if model_name.rfind("quantized_gptq_") >= 0: run_server_value = f'''docker run --gpus all --shm-size 1g -p 8080:80 -v {model_dir}:/data ghcr.io/huggingface/text-generation-inference:latest --model-id /data/{model_name} --quantize gptq''' else: run_server_value = f'''docker run --gpus all --shm-size 1g -p 8080:80 -v {model_dir}:/data ghcr.io/huggingface/text-generation-inference:latest --model-id /data/{model_name}''' run_server_script_textbox = gr.Textbox(label="Run Server:", interactive=False,lines=2, scale=1,value=run_server_value) run_client_value = '''Command-Line Interface(CLI):\ncurl 127.0.0.1:8080/generate -X POST -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' -H 'Content-Type: application/json'\n\nPython:\nfrom huggingface_hub import InferenceClient \nclient = InferenceClient(model="http://127.0.0.1:8080")\noutput = client.text_generation(prompt="What is Deep Learning?",max_new_tokens=512) ''' run_client_script_textbox = gr.Textbox(label="Run Client:", interactive=False, lines=6,scale=1,value=run_client_value) else: run_server_script_textbox = gr.Textbox(label="Run Server:", interactive=False,lines=2, scale=1,value="") run_client_script_textbox = gr.Textbox(label="Run Client:", interactive=False, lines=6, scale=1, value="") # deploy_llm_code = gr.Code(code_str, language="shell", lines=5, label="Install Requirements:") install_requirements_value = ''' ### &nbsp;&nbsp; 1.install docker ### &nbsp;&nbsp; 2.Install NVIDIA Container Toolkit <h4> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2.1 Configure the repository: </h4> <p> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \ && curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \ sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list \ && \ sudo apt-get update </p> <h4> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2.2 Install the NVIDIA Container Toolkit packages: </h4> <p>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; sudo apt-get install -y nvidia-container-toolkit </p> ''' with gr.Accordion("Install Requirements",open=False) as install_requirements_accordion: install_requirements_markdown = gr.Markdown(install_requirements_value) run_llama_cpp_python_code = gr.Code("", language="python", lines=10, label="run_model_using_llama_cpp_python.py",visible=False) # run_script_textbox = gr.Textbox(label="Install Requirements:", interactive=False, scale=1,value=install_requirements_value) #dependencies with gr.TabItem("Tensorboard", id=1) as fdddd: # training_log_markdown = gr.Markdown('',every=mytestfun) with gr.Row(): # training_log_textbox = gr.Textbox(label="logging:",value="", interactive=True, lines=2, scale=1) with gr.Group(): training_log_markdown = gr.Markdown('') stop_training_btn = gr.Button("Stop Training") training_runs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs') run_names = os.listdir(training_runs_dir) run_names = [run_name for run_name in run_names if os.path.isdir(os.path.join(training_runs_dir,run_name))] run_names.sort(key=lambda f: os.path.getmtime(os.path.join(training_runs_dir, f))) # print("dddddddd:",run_names) with gr.Group(): # with gr.Row(): training_runs_dropdown = gr.Dropdown(run_names, label="Training Runs",value=run_names[0] if run_names else None, interactive=True, scale=1) delete_text_btn = gr.Button("Delete Run", scale=1) iframe = f'<iframe src={TENSORBOARD_URL} style="border:none;height:1024px;width:100%">' tensorboard_html = gr.HTML(iframe) with gr.Tab("RAG"): with gr.Row(): with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;ChatBot", elem_classes="white_background") rag_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'rag', 'data') matched_file_list = [] supported_doc_type = ["*.pdf","*.txt","*.docx"] for doc_type in supported_doc_type: matched_file_list += glob.glob(os.path.join(rag_data_dir, doc_type), recursive=False) matched_file_list.sort(key=lambda file: os.path.getmtime(file),reverse=True) matched_file_name_list = [] for matched_file in matched_file_list: matched_file_name_list.append(os.path.basename(matched_file)) # chat_data_source_radio_choices = ["Chat With Document", # f"Chat With Image"] gr.Markdown("### &nbsp;Chat With Document", elem_classes="white_background") # chat_data_source_radio = gr.Radio(chat_data_source_radio_choices, # label="", # value=chat_data_source_radio_choices[0], # interactive=True) with gr.Row(): rag_data_list_dropdown = gr.Dropdown(matched_file_name_list, label=f"Local Documents In {rag_data_dir}", value=matched_file_name_list[0] if matched_file_name_list else None, interactive=True,scale=4, min_width=1) refresh_rag_data_list_btn = gr.Button("Refresh", scale=1, min_width=1) # if not current_running_model_name: # model_running_status_markdown = gr.Markdown(f"<span style='color:red'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;No modelis running!</span>") # else: # model_running_status_markdown = gr.Markdown(f"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Model is runing:{current_running_model_name}.</span>") def click_refresh_rag_data_list_btn(): rag_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'rag', 'data') matched_file_list = [] supported_doc_type = ["*.pdf", "*.txt", "*.docx"] for doc_type in supported_doc_type: matched_file_list += glob.glob(os.path.join(rag_data_dir, doc_type), recursive=False) matched_file_list.sort(key=lambda file: os.path.getmtime(file), reverse=True) matched_file_name_list = [] for matched_file in matched_file_list: matched_file_name_list.append(os.path.basename(matched_file)) return gr.update(choices=matched_file_name_list,value=matched_file_name_list[0] if matched_file_name_list else None) refresh_rag_data_list_btn.click(click_refresh_rag_data_list_btn,[],rag_data_list_dropdown) # def update_model_running_status(): # return gr.update(value=f"<span style='color:red'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;{current_running_model_name} is runing!.</span>") # # load_model_btn.click(click_load_model_btn,model_list_dropdown,[model_list_dropdown]).success(update_model_running_status,[],model_running_status_markdown) with gr.Row(): rag_chatbot = gr.Chatbot(value=[],bubble_full_width=False,rtl=False,layout="panel",height=chatbot_height, avatar_images=((os.path.join(os.path.abspath(''),"pics", "user1.png")), (os.path.join(os.path.abspath(''),"pics", "bot4.png"))), ) with gr.Row(): rag_input_txtbox = gr.Textbox( show_label=False,autofocus=True, placeholder="Enter text and press enter",scale=6) rag_generate_btn = gr.Button("Generate", scale=1) rag_stop_btn = gr.Button("Stop", scale=1) # rag_clear_btn = gr.Button("Clear", scale=1) rag_model_running_status_markdown = gr.Markdown( f"### &nbsp;&nbsp;Retrieved Document Chunks",visible=True) # retrieved_document_chunks_markdown = gr.Markdown( # f"### &nbsp;&nbsp;Retrieved Document Chunks",visible=True) retrieved_document_chunks_dataframe = gr.Dataframe( headers=["ID", "Chunk"], datatype=["str", "str"], show_label=False, value=None ) with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;Setting", elem_classes="white_background") with gr.Group(): with gr.Group(): gr.Markdown("### &nbsp;&nbsp;1.Chunking", elem_classes="white_background") with gr.Row(): text_splitter_dropdown = gr.Dropdown(["RecursiveCharacterTextSplitter"], label=f"Text Splitter", value="RecursiveCharacterTextSplitter", interactive=True, scale=1, min_width=1) with gr.Row(): chunk_size_slider = gr.Slider(32, 1024, value=256, step=32, label="Chunk Size", interactive=True, scale=1) chunk_overlap_slider = gr.Slider(0, 500, value=20, step=10, label="Chunk Overlap", interactive=True) Separators_textbox = gr.Textbox(label="Separators", value='''["\n\n", "\n", ".", " ", ""]''', interactive=True,visible=False) with gr.Group(): gr.Markdown("### &nbsp;&nbsp;2.Vector Store Retriever", elem_classes="white_background") # local_embedding_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),"rag","embedding_models") local_embedding_model_names = get_hg_model_names_from_dir(local_embedding_model_dir,"embedding_models") embedding_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_embedding_model_dir})"] embedding_model_source_radio = gr.Radio(embedding_model_source_radio_choices, label="Embedding Model Source", value=embedding_model_source_radio_choices[0], interactive=True) with gr.Row(): hub_embedding_model_names_dropdown = gr.Dropdown(embedding_model_names, label=f"",show_label=False, value=embedding_model_names[0] if embedding_model_names else None, interactive=True, scale=4, min_width=1) download_hub_embedding_model_names_btn = gr.Button("Download", scale=1) stop_download_hub_embedding_model_names_btn = gr.Button("Stop", scale=1, visible=False) local_embedding_model_names_dropdown = gr.Dropdown(local_embedding_model_names, label=f"Embedding Model",show_label=False, value=local_embedding_model_names[0] if local_embedding_model_names else None, interactive=True, scale=4, min_width=1,visible=False) refresh_local_embedding_model_names_btn = gr.Button("Refresh", scale=1,visible=False) # model_config_path1 = os.path.join(local_embedding_model_dir, # embedding_model_names[0], "pytorch_model.bin") # model_config_path2 = os.path.join(local_embedding_model_dir, # embedding_model_names[0], "model.safetensors") model_config_path = os.path.join(local_embedding_model_dir, embedding_model_names[0], "config.json") if os.path.exists(model_config_path): download_hub_embedding_model_status_markdown = gr.Markdown( '<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local.</span>') else: download_hub_embedding_model_status_markdown = gr.Markdown( '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') with gr.Row(): search_top_k_slider = gr.Slider(1, 10, value=3, step=1, label="Search Top K", interactive=True) search_score_threshold_slider = gr.Slider(0, 1, value=0.5, step=0.1, label="Search Score Threshold",interactive=True) with gr.Group(): gr.Markdown("### &nbsp;&nbsp;3.Chat Model", elem_classes="white_background") local_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),"models") runs_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runs") # local_chat_model_names = get_hg_model_names_from_dir(local_chat_model_dir) local_chat_model_names = get_hg_model_names_and_gguf_from_dir(local_chat_model_dir,runs_model_root_dir) chat_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_chat_model_dir})"] chat_model_source_radio = gr.Radio(chat_model_source_radio_choices, label="Chat Model source",show_label=False, value=chat_model_source_radio_choices[0], interactive=True) with gr.Row(): hub_chat_model_names_dropdown = gr.Dropdown(base_model_names, label=f"Chat Model",show_label=False,allow_custom_value=True, value=base_model_names[0] if base_model_names else None, interactive=True, scale=4, min_width=1) download_hub_chat_model_names_btn = gr.Button("Download", scale=1) stop_download_hub_chat_model_names_btn = gr.Button("Stop", scale=1, visible=False) local_chat_model_names_dropdown = gr.Dropdown(local_chat_model_names, label=f"Chat Model",show_label=False, value=local_chat_model_names[0] if local_chat_model_names else None, interactive=True, scale=4, min_width=1,visible=False) refresh_local_chat_model_names_btn = gr.Button("Refresh", scale=1,visible=False) rag_using_4bit_quantization_checkbox = gr.Checkbox(True, label="Using 4-bit quantization", interactive=True, visible=True, info="Less memory but slower", scale=1 ) if validate_model_path(base_model_names[0])[0]: download_hub_chat_model_status_markdown = gr.Markdown( '<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local.</span>') else: download_hub_chat_model_status_markdown = gr.Markdown( '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') with gr.Tab("Setting"): # with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;Setting", elem_classes="white_background") with gr.Group(): with gr.Row(): max_new_tokens_slider = gr.Slider(1, 4096, value=256, step=0.1, label="Max New Tokens", interactive=True) temperature_slider = gr.Slider(0, 5, value=1, step=0.1, label="Temperature", interactive=True) with gr.Row(): top_k_slider = gr.Slider(1, 100, value=50, step=1, label="Top_k", interactive=True) top_p_slider = gr.Slider(0, 1, value=1, step=0.1, label="Top_p", interactive=True) with gr.Row(): repeat_penalty_slider = gr.Slider(1, 5, value=1, step=0.1, label="Repeat Penalty", interactive=True) with gr.Row(): chat_history_window_slider = gr.Slider(1, 20, value=3, step=1, label="Chat History Window", interactive=True) low_cpu_mem_usage_checkbox = gr.Checkbox(False, label="Low Cpu Mem Usage",interactive=True,visible=False) Huggingface_hub_token = gr.Textbox(label="Huggingface Hub Token", value="") def check_local_model_or_dataset_is_empty1(base_model_name_dropdown,Huggingface_hub_token): if len(base_model_name_dropdown.strip()) == 0: raise gr.Error("Name is empty!") try: login_huggingface(Huggingface_hub_token,base_model_name_dropdown) except Exception as e: raise gr.Error(e) def check_local_model_or_dataset_is_empty2(base_model_name_dropdown,Huggingface_hub_token): if len(base_model_name_dropdown.strip()) == 0: raise gr.Error("Name is empty!") try: login_huggingface(Huggingface_hub_token,base_model_name_dropdown) except Exception as e: raise gr.Error(e) def check_local_model_or_dataset_is_empty3(base_model_name_dropdown,Huggingface_hub_token): if len(base_model_name_dropdown.strip()) == 0: raise gr.Error("Name is empty!") try: login_huggingface(Huggingface_hub_token,base_model_name_dropdown) except Exception as e: raise gr.Error(e) def check_local_model_or_dataset_is_empty4(base_model_name_dropdown,Huggingface_hub_token): if len(base_model_name_dropdown.strip()) == 0: raise gr.Error("Name is empty!") try: login_huggingface(Huggingface_hub_token,base_model_name_dropdown) except Exception as e: raise gr.Error(e) def check_local_model_or_dataset_is_empty5(base_model_name_dropdown,Huggingface_hub_token): if len(base_model_name_dropdown.strip()) == 0: raise gr.Error("Name is empty!") try: login_huggingface(Huggingface_hub_token,base_model_name_dropdown) except Exception as e: raise gr.Error(e) def download_hub_home_chat_model_postprocess(): return gr.update(visible=True), gr.update(visible=False) def click_download_hub_home_chat_model_btn(): return gr.update(visible=False), gr.update(visible=True), gr.update(visible=True) def click_stop_download_hub_home_chat_model_names_btn(): return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False) def click_stop_download_hub_home_chat_model_names_btn(): return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False) def change_home_chat_model_source_radio(home_chat_model_source_radio, hub_home_chat_model_names_dropdown): local_home_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") if home_chat_model_source_radio == "Download From Huggingface Hub": if not hub_home_chat_model_names_dropdown: model_download_status = '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;No model is selected.</span>' else: if validate_model_path(hub_home_chat_model_names_dropdown)[0]: model_download_status = '<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local,click load model to run.</span>' else: model_download_status = '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>' return gr.update(visible=True), gr.update(visible=False), gr.update( visible=False), gr.update(visible=True, value=model_download_status), gr.update( visible=True), gr.update( visible=False) else: model_download_status = "" return gr.update(visible=False), gr.update(visible=True), gr.update( visible=True), gr.update(visible=False, value=model_download_status), gr.update( visible=False), gr.update( visible=False) click_download_hub_home_chat_model_names_btn_event = download_hub_home_chat_model_names_btn.click( check_local_model_or_dataset_is_empty1, [hub_home_chat_model_names_dropdown,Huggingface_hub_token]).success( click_download_hub_home_chat_model_btn, [], [download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn, download_hub_home_chat_model_status_markdown]).then( download_model_wrapper, [hub_home_chat_model_names_dropdown, local_home_chat_model_root_dir_textbox], download_hub_home_chat_model_status_markdown). \ then(download_hub_home_chat_model_postprocess, [], [download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn]) stop_download_hub_home_chat_model_names_btn.click(click_stop_download_hub_home_chat_model_names_btn, [], [download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn, download_hub_home_chat_model_status_markdown], cancels=[ click_download_hub_home_chat_model_names_btn_event]) home_chat_model_source_radio.change(change_home_chat_model_source_radio, [home_chat_model_source_radio, hub_home_chat_model_names_dropdown], [hub_home_chat_model_names_dropdown, local_home_chat_model_names_dropdown, refresh_local_home_chat_model_names_btn, download_hub_home_chat_model_status_markdown, download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn], cancels=[click_download_hub_home_chat_model_names_btn_event]) def change_refresh_local_home_chat_model_names_btn(): local_home_chat_model_names = get_hg_model_names_and_gguf_from_dir(local_home_chat_model_dir,runs_model_root_dir) return gr.update(choices=local_home_chat_model_names,value = local_home_chat_model_names[0] if local_home_chat_model_names else None) refresh_local_home_chat_model_names_btn.click(change_refresh_local_home_chat_model_names_btn,[],[local_home_chat_model_names_dropdown]) def change_hub_home_chat_model_names_dropdown(hub_home_chat_model_names_dropdown): if not hub_home_chat_model_names_dropdown: return gr.update(visible=True, value='<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;No model is selected.</span>'), \ gr.update(visible=True), gr.update(visible=False) if validate_model_path(hub_home_chat_model_names_dropdown)[0]: return gr.update( visible=True, value='<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local,click load model to run.</span>'), \ gr.update(visible=True), gr.update(visible=False) else: return gr.update(visible=True, value='<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>'), \ gr.update(visible=True), gr.update(visible=False) hub_home_chat_model_names_dropdown.change(change_hub_home_chat_model_names_dropdown, hub_home_chat_model_names_dropdown, [download_hub_home_chat_model_status_markdown, download_hub_home_chat_model_names_btn, stop_download_hub_home_chat_model_names_btn], cancels=[click_download_hub_home_chat_model_names_btn_event]) def click_load_home_chat_model_btn(home_chat_model_source_radio, hub_home_chat_model_names_dropdown, local_home_chat_model_names_dropdown, max_new_tokens_slider, temperature_slider, top_k_slider, top_p_slider, repeat_penalty_slider, chat_history_window_slider,using_4bit_quantization_checkbox,low_cpu_mem_usage_checkbox, progress=gr.Progress()): if home_chat_model_source_radio == "Download From Huggingface Hub": cur_model_name = hub_home_chat_model_names_dropdown else: cur_model_name = local_home_chat_model_names_dropdown if not validate_model_path(cur_model_name)[0]: raise gr.Error(f"Model does not exist!") global infer_model global stop_generation_status stop_generation_status = True progress(0.6) if infer_model: infer_model.free_memory() infer_model = None torch.cuda.empty_cache() yield "Loading model ..." load_model_status = 0 model_path = validate_model_path(cur_model_name)[1] if model_path.split('.')[-1] == "gguf":
infer_model = LlamaCppInference(model_path=model_path, max_new_tokens=max_new_tokens_slider,
2
2023-11-25 12:37:21+00:00
16k
danilonumeroso/conar
models/tsp_reasoner.py
[ { "identifier": "vmapped_beam_search_rollout", "path": "baselines/beam_search.py", "snippet": "BEAM_WIDTH = 128\ndef expand_single(beam_vis, beam_last, beam_cost, beam_par, W):\ndef beam_search_rollout_step(W, beam_width, i, tpl):\ndef beam_search_rollout(start_route, W, num_nodes, beam_width):\ndef bea...
from collections import defaultdict from pprint import pprint from torch_geometric.loader import DataLoader from pytorch_lightning.trainer.supporters import CombinedLoader from baselines.beam_search import vmapped_beam_search_rollout, BEAM_WIDTH from models.algorithm_reasoner import AlgorithmReasoner, LitAlgorithmReasoner from hyperparameters import get_hyperparameters from torch_geometric.utils import k_hop_subgraph from datasets._configs import CONFIGS from utils_execution import cross_entropy, check_edge_index_sorted, prepare_constants, edge_one_hot_encode_pointers, get_number_of_nodes from clrs import Type, Location, Stage import copy import itertools import time import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch_scatter import torch_geometric import pytorch_lightning as pl
13,470
class TSPReasoner(AlgorithmReasoner): def __init__(self, spec, data, latent_features, algo_processor, bias=True, use_TF=False, L1_loss=False, global_termination_pool='max', #'predinet', get_attention=False, use_batch_norm=False, transferring=False, timeit=True, double_process=False, **algo_reasoner_kwargs): super().__init__( spec, data, latent_features, algo_processor, use_TF=use_TF, timeit=timeit, L1_loss=L1_loss, global_termination_pool=global_termination_pool, get_attention=get_attention, use_batch_norm=use_batch_norm, transferring=transferring, **algo_reasoner_kwargs, ) self.step_idx = 0 self.assert_checks = False self.debug = False self.debug_epoch_threshold = 1e9 self.next_step_pool = True self.double_process = double_process self.lambda_mul = 1# 0.0001 self.transferring = transferring def get_input_output_hints(self, batch): hint_inp_curr = dict() hint_out_curr = dict() return hint_inp_curr, hint_out_curr def process( self, *args, **kwargs): self.all_hint_logits, self.last_logits, self.all_masks_graph = super().process( *args, first_n_processors=1000 if not self.double_process else 1, **kwargs) if self.double_process: self.all_hint_logits, self.last_logits, self.all_masks_graph = super().process( *args, init_last_latent=self.last_latent, **kwargs) return self.all_hint_logits, self.last_logits, self.all_masks_graph class LitTSPReasoner(LitAlgorithmReasoner): def __init__(self, hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=True, use_TF=False, ensure_permutation='greedy', transferring=False,
class TSPReasoner(AlgorithmReasoner): def __init__(self, spec, data, latent_features, algo_processor, bias=True, use_TF=False, L1_loss=False, global_termination_pool='max', #'predinet', get_attention=False, use_batch_norm=False, transferring=False, timeit=True, double_process=False, **algo_reasoner_kwargs): super().__init__( spec, data, latent_features, algo_processor, use_TF=use_TF, timeit=timeit, L1_loss=L1_loss, global_termination_pool=global_termination_pool, get_attention=get_attention, use_batch_norm=use_batch_norm, transferring=transferring, **algo_reasoner_kwargs, ) self.step_idx = 0 self.assert_checks = False self.debug = False self.debug_epoch_threshold = 1e9 self.next_step_pool = True self.double_process = double_process self.lambda_mul = 1# 0.0001 self.transferring = transferring def get_input_output_hints(self, batch): hint_inp_curr = dict() hint_out_curr = dict() return hint_inp_curr, hint_out_curr def process( self, *args, **kwargs): self.all_hint_logits, self.last_logits, self.all_masks_graph = super().process( *args, first_n_processors=1000 if not self.double_process else 1, **kwargs) if self.double_process: self.all_hint_logits, self.last_logits, self.all_masks_graph = super().process( *args, init_last_latent=self.last_latent, **kwargs) return self.all_hint_logits, self.last_logits, self.all_masks_graph class LitTSPReasoner(LitAlgorithmReasoner): def __init__(self, hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=True, use_TF=False, ensure_permutation='greedy', transferring=False,
learning_rate=get_hyperparameters()['lr'],
3
2023-11-20 15:32:43+00:00
16k
harisankar95/pathfinding3D
test/test_path.py
[ { "identifier": "DiagonalMovement", "path": "pathfinding3d/core/diagonal_movement.py", "snippet": "class DiagonalMovement:\n always = 1\n never = 2\n if_at_most_one_obstacle = 3\n only_when_no_obstacle = 4" }, { "identifier": "Grid", "path": "pathfinding3d/core/grid.py", "sni...
import numpy as np import pytest from pathfinding3d.core.diagonal_movement import DiagonalMovement from pathfinding3d.core.grid import Grid from pathfinding3d.core.node import GridNode from pathfinding3d.finder.a_star import AStarFinder from pathfinding3d.finder.best_first import BestFirst from pathfinding3d.finder.bi_a_star import BiAStarFinder from pathfinding3d.finder.breadth_first import BreadthFirstFinder from pathfinding3d.finder.dijkstra import DijkstraFinder from pathfinding3d.finder.finder import ExecutionRunsException, ExecutionTimeException from pathfinding3d.finder.ida_star import IDAStarFinder from pathfinding3d.finder.msp import MinimumSpanningTree
11,215
finders = [ AStarFinder, BestFirst, BiAStarFinder, DijkstraFinder, IDAStarFinder, BreadthFirstFinder, MinimumSpanningTree, ] TIME_LIMIT = 10 # give it a 10 second limit. weighted_finders = [ AStarFinder, BiAStarFinder, DijkstraFinder, MinimumSpanningTree, ] SIMPLE_MATRIX = np.zeros((5, 5, 5)) SIMPLE_MATRIX[0, 0, 0] = 1 SIMPLE_MATRIX[0, 0, 1] = 1 SIMPLE_MATRIX[0, 0, 2] = 1 SIMPLE_MATRIX[0, 0, 3] = 1 SIMPLE_MATRIX[0, 0, 4] = 1 SIMPLE_MATRIX[1, :, :] = 1 SIMPLE_MATRIX[2, :, :] = 1 SIMPLE_MATRIX[3, :, :] = 1 SIMPLE_MATRIX[4, 0, 0] = 1 SIMPLE_MATRIX[4, 1, 0] = 1 SIMPLE_MATRIX[4, 2, 0] = 1 SIMPLE_MATRIX[4, 3, 0] = 1 SIMPLE_MATRIX[4, 4, 0] = 1 WEIGHTED_SIMPLE_MATRIX = np.copy(SIMPLE_MATRIX) WEIGHTED_SIMPLE_MATRIX[4, 1, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 2, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 3, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 2, 0] = 99 WEIGHTED_SIMPLE_MATRIX[1, :, :] = 99 WEIGHTED_SIMPLE_MATRIX[2, :, :] = 99 WEIGHTED_SIMPLE_MATRIX[3, :, :] = 99 def test_path(): """ test if we can find a path """ grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup() finder = find(time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 9 def test_weighted_path(): grid = Grid(matrix=WEIGHTED_SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in weighted_finders: grid.cleanup() finder = find(time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 11 def test_path_diagonal(): # test diagonal movement grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup() finder = find(diagonal_movement=DiagonalMovement.always, time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 5 def test_max_runs(): grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup() finder = find(diagonal_movement=DiagonalMovement.always, time_limit=TIME_LIMIT, max_runs=3)
finders = [ AStarFinder, BestFirst, BiAStarFinder, DijkstraFinder, IDAStarFinder, BreadthFirstFinder, MinimumSpanningTree, ] TIME_LIMIT = 10 # give it a 10 second limit. weighted_finders = [ AStarFinder, BiAStarFinder, DijkstraFinder, MinimumSpanningTree, ] SIMPLE_MATRIX = np.zeros((5, 5, 5)) SIMPLE_MATRIX[0, 0, 0] = 1 SIMPLE_MATRIX[0, 0, 1] = 1 SIMPLE_MATRIX[0, 0, 2] = 1 SIMPLE_MATRIX[0, 0, 3] = 1 SIMPLE_MATRIX[0, 0, 4] = 1 SIMPLE_MATRIX[1, :, :] = 1 SIMPLE_MATRIX[2, :, :] = 1 SIMPLE_MATRIX[3, :, :] = 1 SIMPLE_MATRIX[4, 0, 0] = 1 SIMPLE_MATRIX[4, 1, 0] = 1 SIMPLE_MATRIX[4, 2, 0] = 1 SIMPLE_MATRIX[4, 3, 0] = 1 SIMPLE_MATRIX[4, 4, 0] = 1 WEIGHTED_SIMPLE_MATRIX = np.copy(SIMPLE_MATRIX) WEIGHTED_SIMPLE_MATRIX[4, 1, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 2, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 3, 1] = 1 WEIGHTED_SIMPLE_MATRIX[4, 2, 0] = 99 WEIGHTED_SIMPLE_MATRIX[1, :, :] = 99 WEIGHTED_SIMPLE_MATRIX[2, :, :] = 99 WEIGHTED_SIMPLE_MATRIX[3, :, :] = 99 def test_path(): """ test if we can find a path """ grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup() finder = find(time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 9 def test_weighted_path(): grid = Grid(matrix=WEIGHTED_SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in weighted_finders: grid.cleanup() finder = find(time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 11 def test_path_diagonal(): # test diagonal movement grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup() finder = find(diagonal_movement=DiagonalMovement.always, time_limit=TIME_LIMIT) path_, runs = finder.find_path(start, end, grid) path = [] for node in path_: if isinstance(node, GridNode): path.append((node.x, node.y, node.z)) elif isinstance(node, tuple): path.append((node[0], node[1], node[2])) print(find.__name__) print(f"path: {path}") print(f"length: {len(path)}, runs: {runs}") assert len(path) == 5 def test_max_runs(): grid = Grid(matrix=SIMPLE_MATRIX) start = grid.node(0, 0, 0) end = grid.node(4, 4, 0) for find in finders: grid.cleanup() finder = find(diagonal_movement=DiagonalMovement.always, time_limit=TIME_LIMIT, max_runs=3)
with pytest.raises(ExecutionRunsException):
8
2023-11-21 10:14:12+00:00
16k
yuukawahiroshi/ddb-tools
extract_wav.py
[ { "identifier": "DDIModel", "path": "utils/ddi_utils.py", "snippet": "class DDIModel:\n def __init__(self, ddi_bytes: bytes) -> None:\n self.ddi_bytes = ddi_bytes\n self.ddi_data = None\n self.phdc_data = {}\n self.tdb_data = {}\n self.sta_data = {}\n self.ar...
import argparse import math import os import re import time import wave from typing import Sequence, TypedDict from utils.ddi_utils import DDIModel, bytes_to_str, stream_reverse_search
11,875
snd_pos_list: list[int] = [] # Read DDI file print("Reading DDI...") with open(ddi_path, "rb") as f: ddi_bytes = f.read() ddi_model = DDIModel(ddi_bytes) ddi_model.read() # Extract snd files from DDB ddb_size = os.path.getsize(ddb_path) with open(ddb_path, "rb") as ddb_f: # Dump articulation art_list: list[tuple[list, dict]] = [] for idx, art_item in ddi_model.art_data.items(): if "artu" in art_item: # Triphoneme for idx, artu_item in art_item["artu"].items(): if "artp" in artu_item: for idx, artp_item in artu_item["artp"].items(): phonemes = [art_item["phoneme"], artu_item["phoneme"]] art_list.append((phonemes, artp_item)) if "artu" in artu_item: for idx, artu2_item in artu_item["artu"].items(): if "artp" in artu2_item: for idx, artp_item in artu2_item["artp"].items(): phonemes = [ art_item["phoneme"], artu_item["phoneme"], artu2_item["phoneme"]] art_list.append((phonemes, artp_item)) for art_item in art_list: phonemes = art_item[0] art_item = art_item[1] _, t = art_item["snd"].split("=") snd_offset, _ = t.split("_") snd_offset = int(snd_offset, 16) pitch = art_item["pitch1"] output_path = os.path.join(dst_path, create_file_name( phonemes, filename_style, snd_offset, pitch, dst_path, "wav")) ddb_f.seek(snd_offset) snd_ident = ddb_f.read(4) if snd_ident != start_encode: print( f'Error: SND header not found for articulation [{" ".join(phonemes)}] {i}') continue # Read snd header snd_length = int.from_bytes(ddb_f.read(4), byteorder='little') snd_frame_rate = int.from_bytes(ddb_f.read(4), byteorder='little') snd_channel = int.from_bytes(ddb_f.read(2), byteorder='little') int.from_bytes(ddb_f.read(4), byteorder='little') # unknown snd_bytes = ddb_f.read(snd_length - 18) wav_params = (snd_channel, 2, snd_frame_rate, 0, 'NONE', 'NONE') # Write snd to wave file with wave.open(output_path, "wb") as wav_f: wav_f.setparams(wav_params) wav_f.writeframes(snd_bytes) print("Dumped [%s] -> %s" % (" ".join(phonemes), output_path)) snd_pos_list.append(snd_offset) if (gen_lab or gen_seg) and art_item.get("frame_align"): _, t = art_item["snd_start"].split("=") snd_vstart_offset, _ = t.split("_") snd_vstart_offset = int(snd_vstart_offset, 16) snd_empt_bytes = snd_vstart_offset - snd_offset if gen_lab: lab_content = generate_lab( phonemes, art_item["frame_align"], snd_frame_rate, snd_empt_bytes, snd_length) lab_output_path = os.path.join(dst_path, create_file_name( phonemes, filename_style, snd_offset, pitch, dst_path, "lab")) with open(lab_output_path, "w") as lab_f: lab_f.write(lab_content) elif gen_seg: unvoiced_consonant_list = ddi_model.phdc_data["phoneme"]["unvoiced"] trans_content, seg_content, art_seg_content = generate_seg_files( phonemes, art_item["frame_align"], snd_frame_rate, snd_empt_bytes, snd_length, unvoiced_consonant_list ) trans_output_path = os.path.join(dst_path, create_file_name( phonemes, filename_style, snd_offset, pitch, dst_path, "trans")) seg_output_path = os.path.join(dst_path, create_file_name( phonemes, filename_style, snd_offset, pitch, dst_path, "seg")) art_seg_output_path = os.path.join(dst_path, create_file_name( phonemes, filename_style, snd_offset, pitch, dst_path, "as0")) with open(trans_output_path, "w") as fp: fp.write(trans_content) with open(seg_output_path, "w") as fp: fp.write(seg_content) with open(art_seg_output_path, "w") as fp: fp.write(art_seg_content) # Dump stationary for _, sta_info in ddi_model.sta_data.items(): phoneme = sta_info["phoneme"] for sta_idx, sta_item in sta_info["stap"].items(): _, snd_name = sta_item["snd"].split("=") snd_offset, snd_id = snd_name.split("_") snd_offset = int(snd_offset, 16) pitch = sta_item["pitch1"] output_path = os.path.join(dst_path, create_file_name( [phoneme], filename_style, snd_offset, pitch, dst_path, "wav")) # real_snd_offset = 0x3d
#!/usr/bin/env python3 from __future__ import annotations start_encode = 'SND '.encode() wav_params = (1, 2, 44100, 0, 'NONE', 'NONE') window_size = 512 class ArticulationSegmentInfo(TypedDict): phonemes: list[str, str] boundaries: list[list[str, float, float]] def escape_xsampa(xsampa: str) -> str: """Escapes xsampa to file name.""" xsampa = xsampa.replace("Sil", "sil") # Sil is a special case xsampa = ( xsampa.replace("\\", "-") .replace("/", "~") .replace("?", "!") .replace(":", ";") .replace("<", "(") .replace(">", ")") ) return xsampa def unescape_xsampa(xsampa: str) -> str: """Unescapes xsampa from file name.""" xsampa = ( xsampa.replace("-", "\\") .replace("~", "/") .replace("!", "?") .replace(";", ":") .replace("(", "<") .replace(")", ">") ) return xsampa def parse_args(args: Sequence[str] = None): # : list[str] # initialize parser parser = argparse.ArgumentParser() parser.add_argument('--src_path', required=True, help='source ddi file path') parser.add_argument('--dst_path', help='destination extract path, ' 'default to be "./[name]/snd"') parser.add_argument('--gen_lab', action='store_true', help='generate lab file') parser.add_argument('--gen_seg', action='store_true', help='generate trans, seg, as files') parser.add_argument('--filename_style', type=str, choices=['flat', 'devkit'], default=None, help="output filename style, default to be 'devkit', or default to be 'flat' if gen_lab is true.") # parse args args_result = parser.parse_args(args) ddi_path: str = os.path.normpath(args_result.src_path) ddb_path: str = re.sub(r'\.ddi$', '.ddb', ddi_path) dst_path: str = args_result.dst_path if dst_path is None: dst_path = os.path.dirname(ddi_path) + '/snd' dst_path: str = os.path.normpath(dst_path) # make dirs if not os.path.exists(dst_path): os.makedirs(dst_path) gen_lab: bool = args_result.gen_lab gen_seg: bool = args_result.gen_seg filename_style: str = args_result.filename_style if filename_style is None: if gen_lab or gen_seg: filename_style = "flat" else: filename_style = "devkit" return ddi_path, ddb_path, dst_path, filename_style, gen_lab, gen_seg def create_file_name(phonemes: list[str], name_style: str, offset: int, pitch: float, dst_path: str, file_type: str): offset_hex = f'{offset:0>8x}' escaped_phonemes = [escape_xsampa(p) for p in phonemes] phonemes_len = len(phonemes) if pitch >= 0: pit_str = f"pit+{pitch:.2f}" else: pit_str = f"pit{pitch:.2f}" filename = "" if name_style == "flat": phonemes_str = "-".join(escaped_phonemes) prefix = "" if phonemes_len == 0: filename = f"unknown_{offset_hex}.{file_type}" else: if phonemes_len == 1: if phonemes[0] == "growl": prefix = "growl" else: prefix = "sta" elif phonemes_len == 2: prefix = "art" elif phonemes_len == 3: prefix = "tri" file_type_prefix = "lab" if file_type == "lab" else "wav" filename = f"{file_type_prefix}/{prefix}_[{phonemes_str}]_{pit_str}_{offset_hex}.{file_type}" elif name_style == "devkit": phonemes_path = "/".join([item + "#" + bytes_to_str(item.encode('utf-8')) for item in escaped_phonemes]) root_path = "" if phonemes_len == 0: filename = f"unknown/{offset_hex}.{file_type}" else: if phonemes_len == 1: if phonemes[0] == "growl": root_path = "vqm/growl" else: root_path = "stationary" elif phonemes_len == 2: root_path = "articulation" elif phonemes_len == 3: root_path = "triphoneme" filename = f"{root_path}/{phonemes_path}/{pit_str}_{offset_hex}.{file_type}" folder = os.path.dirname(filename) if folder != "": os.makedirs(os.path.join(dst_path, folder), exist_ok=True) return filename def nsample2sec(nsample: int, sample_rate: int) -> float: return nsample / sample_rate / 2 def frm2sec(frm: int, sample_rate: int) -> float: return frm * window_size / sample_rate / 2 def generate_lab(phonemes: list[str], frame_align: list[dict], sample_rate: int, offset_bytes: int, total_bytes: int): offset_time = nsample2sec(offset_bytes, sample_rate) * 1e7 duration_time = nsample2sec(total_bytes, sample_rate) * 1e7 lab_lines = [] if len(phonemes) == 3: # VCV center_phoneme = re.sub("^\^", "", phonemes[1]) phonemes = [phonemes[0], center_phoneme, center_phoneme, phonemes[2]] lab_lines.append(f"0 {offset_time:.0f} sil") last_time = 0 for i, phoneme in enumerate(phonemes): frame = frame_align[i] start_time = offset_time + frm2sec(frame["start"], sample_rate) * 1e7 end_time = offset_time + frm2sec(frame["end"], sample_rate) * 1e7 lab_lines.append(f'{start_time:.0f} {end_time:.0f} {phoneme}') last_time = end_time lab_lines.append(f'{last_time:.0f} {duration_time:.0f} sil') return "\n".join(lab_lines) def generate_seg_files( phonemes: list[str], frame_align: list[dict], sample_rate: int, offset_bytes: int, total_bytes: int, unvoiced_consonant_list: list[str]): offset_time = nsample2sec(offset_bytes, sample_rate) duration_time = nsample2sec(total_bytes, sample_rate) if len(phonemes) == 3: # VCV center_phoneme = re.sub("^\^", "", phonemes[1]) phonemes = [phonemes[0], center_phoneme, center_phoneme, phonemes[2]] seg_list: list[list] = [] boundaries: list[float] = [] for i, phoneme in enumerate(phonemes): start_time = offset_time + \ frm2sec(frame_align[i]["start"], sample_rate) end_time = offset_time + frm2sec(frame_align[i]["end"], sample_rate) if i == 0: boundaries.append(start_time) boundaries.append(end_time) seg_list.append([phoneme, start_time, end_time]) art_seg_info: ArticulationSegmentInfo = { "boundaries": boundaries, "phonemes": [] } if len(phonemes) == 4: # VCV art_seg_info["phonemes"] = [phonemes[0], phonemes[1], phonemes[3]] else: art_seg_info["phonemes"] = phonemes trans_content = generate_transcription(seg_list) seg_content = generate_seg(seg_list, duration_time) art_seg_content = generate_articulation_seg( art_seg_info, total_bytes, unvoiced_consonant_list) return trans_content, seg_content, art_seg_content def generate_transcription(seg_info: list[list]) -> str: content = [] phoneme_list = [] for i in range(0, len(seg_info)): phoneme_list.append(seg_info[i][0]) content.append(" ".join(phoneme_list)) trans_group = [item[0] for item in seg_info] content.append("[" + " ".join(trans_group) + "]") return "\n".join(content) def generate_seg( phoneme_list: list[list], wav_length: float ) -> str: content = [ "nPhonemes %d" % (len(phoneme_list) + 2,), # Add 2 Sil "articulationsAreStationaries = 0", "phoneme BeginTime EndTime", "===================================================", ] content.append("%s\t\t%.6f\t\t%.6f" % ("Sil", 0, phoneme_list[0][1])) begin_time: float = 0 end_time: float = 0 for i in range(0, len(phoneme_list)): phoneme_info = phoneme_list[i] phoneme_name = phoneme_info[0] begin_time = phoneme_info[1] end_time = phoneme_info[2] content.append("%s\t\t%.6f\t\t%.6f" % (phoneme_name, begin_time, end_time)) content.append("%s\t\t%.6f\t\t%.6f" % ("Sil", end_time, wav_length)) return "\n".join(content) + "\n" def generate_articulation_seg( art_seg_info: ArticulationSegmentInfo, wav_samples: int, unvoiced_consonant_list: list[str] ) -> str: content = [ "nphone art segmentation", "{", '\tphns: ["' + ('", "'.join(art_seg_info["phonemes"])) + '"];', "\tcut offset: 0;", "\tcut length: %d;" % int(math.floor(wav_samples / 2)), ] boundaries_str = [ ("%.9f" % item) for item in art_seg_info["boundaries"] ] content.append("\tboundaries: [" + ", ".join(boundaries_str) + "];") content.append("\trevised: false;") voiced_str = [] is_triphoneme = len(art_seg_info["phonemes"]) == 3 for i in range(0, len(art_seg_info["phonemes"])): phoneme = art_seg_info["phonemes"][i] is_unvoiced = phoneme in unvoiced_consonant_list or phoneme in [ "Sil", "Asp", "?", ] voiced_str.append(str(not is_unvoiced).lower()) if is_triphoneme and i == 1: # Triphoneme needs 2 flags for center phoneme voiced_str.append(str(not is_unvoiced).lower()) content.append("\tvoiced: [" + ", ".join(voiced_str) + "];") content.append("};") content.append("") return "\n".join(content) def main(): ddi_path, ddb_path, dst_path, filename_style, gen_lab, gen_seg = parse_args() snd_pos_list: list[int] = [] # Read DDI file print("Reading DDI...") with open(ddi_path, "rb") as f: ddi_bytes = f.read() ddi_model = DDIModel(ddi_bytes) ddi_model.read() # Extract snd files from DDB ddb_size = os.path.getsize(ddb_path) with open(ddb_path, "rb") as ddb_f: # Dump articulation art_list: list[tuple[list, dict]] = [] for idx, art_item in ddi_model.art_data.items(): if "artu" in art_item: # Triphoneme for idx, artu_item in art_item["artu"].items(): if "artp" in artu_item: for idx, artp_item in artu_item["artp"].items(): phonemes = [art_item["phoneme"], artu_item["phoneme"]] art_list.append((phonemes, artp_item)) if "artu" in artu_item: for idx, artu2_item in artu_item["artu"].items(): if "artp" in artu2_item: for idx, artp_item in artu2_item["artp"].items(): phonemes = [ art_item["phoneme"], artu_item["phoneme"], artu2_item["phoneme"]] art_list.append((phonemes, artp_item)) for art_item in art_list: phonemes = art_item[0] art_item = art_item[1] _, t = art_item["snd"].split("=") snd_offset, _ = t.split("_") snd_offset = int(snd_offset, 16) pitch = art_item["pitch1"] output_path = os.path.join(dst_path, create_file_name( phonemes, filename_style, snd_offset, pitch, dst_path, "wav")) ddb_f.seek(snd_offset) snd_ident = ddb_f.read(4) if snd_ident != start_encode: print( f'Error: SND header not found for articulation [{" ".join(phonemes)}] {i}') continue # Read snd header snd_length = int.from_bytes(ddb_f.read(4), byteorder='little') snd_frame_rate = int.from_bytes(ddb_f.read(4), byteorder='little') snd_channel = int.from_bytes(ddb_f.read(2), byteorder='little') int.from_bytes(ddb_f.read(4), byteorder='little') # unknown snd_bytes = ddb_f.read(snd_length - 18) wav_params = (snd_channel, 2, snd_frame_rate, 0, 'NONE', 'NONE') # Write snd to wave file with wave.open(output_path, "wb") as wav_f: wav_f.setparams(wav_params) wav_f.writeframes(snd_bytes) print("Dumped [%s] -> %s" % (" ".join(phonemes), output_path)) snd_pos_list.append(snd_offset) if (gen_lab or gen_seg) and art_item.get("frame_align"): _, t = art_item["snd_start"].split("=") snd_vstart_offset, _ = t.split("_") snd_vstart_offset = int(snd_vstart_offset, 16) snd_empt_bytes = snd_vstart_offset - snd_offset if gen_lab: lab_content = generate_lab( phonemes, art_item["frame_align"], snd_frame_rate, snd_empt_bytes, snd_length) lab_output_path = os.path.join(dst_path, create_file_name( phonemes, filename_style, snd_offset, pitch, dst_path, "lab")) with open(lab_output_path, "w") as lab_f: lab_f.write(lab_content) elif gen_seg: unvoiced_consonant_list = ddi_model.phdc_data["phoneme"]["unvoiced"] trans_content, seg_content, art_seg_content = generate_seg_files( phonemes, art_item["frame_align"], snd_frame_rate, snd_empt_bytes, snd_length, unvoiced_consonant_list ) trans_output_path = os.path.join(dst_path, create_file_name( phonemes, filename_style, snd_offset, pitch, dst_path, "trans")) seg_output_path = os.path.join(dst_path, create_file_name( phonemes, filename_style, snd_offset, pitch, dst_path, "seg")) art_seg_output_path = os.path.join(dst_path, create_file_name( phonemes, filename_style, snd_offset, pitch, dst_path, "as0")) with open(trans_output_path, "w") as fp: fp.write(trans_content) with open(seg_output_path, "w") as fp: fp.write(seg_content) with open(art_seg_output_path, "w") as fp: fp.write(art_seg_content) # Dump stationary for _, sta_info in ddi_model.sta_data.items(): phoneme = sta_info["phoneme"] for sta_idx, sta_item in sta_info["stap"].items(): _, snd_name = sta_item["snd"].split("=") snd_offset, snd_id = snd_name.split("_") snd_offset = int(snd_offset, 16) pitch = sta_item["pitch1"] output_path = os.path.join(dst_path, create_file_name( [phoneme], filename_style, snd_offset, pitch, dst_path, "wav")) # real_snd_offset = 0x3d
real_snd_offset = stream_reverse_search(
2
2023-11-20 11:37:46+00:00
16k
shercoo/RGDiffSR
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import datetime import math import cv2 import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import pygame from collections import OrderedDict from matplotlib import pyplot as plt from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from torchvision import transforms from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from text_super_resolution.model.VisionLAN.utils import Attention_AR_counter from text_super_resolution.model.tps_spatial_transformer import TPSSpatialTransformer from text_super_resolution.model.stn_head import STNHead from text_super_resolution.model.VisionLAN.VisionLAN import VisionLAN from utils.render_standard_text import * from text_super_resolution.loss.semantic_loss import SemanticLoss from text_super_resolution.utils import ssim_psnr from pygame import freetype from utils.metrics import *
14,272
log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, text_prior_enable=False, image_height=32, image_width=128, STN_enable=False, standard_text=False, VL_pretrained_path=None, fid_eval=False, visualize=False, down_sample_rate=2, recog_loss_enable=False, font_path=None, *args, **kwargs): self.fid_eval = fid_eval self.visualize = visualize self.text_prior_enable = text_prior_enable self.recog_loss_enable = recog_loss_enable self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True self.image_height = image_height self.image_width = image_width self.stn = STN_enable if self.stn: self.tps_inputsize = [image_height // down_sample_rate, image_width // down_sample_rate] tps_outputsize = [image_height // down_sample_rate, image_width // down_sample_rate] num_control_points = 20 tps_margins = [0.05, 0.05] self.tps = TPSSpatialTransformer( output_image_size=tuple(tps_outputsize), num_control_points=num_control_points, margins=tuple(tps_margins)) self.stn_head = STNHead( in_planes=3, num_ctrlpoints=num_control_points, activation='none', input_size=self.tps_inputsize) self.standard_text = standard_text if self.standard_text: # self.VL_model = self.VisionLAN_init(VL_pretrained_path) # self.test_acc_counter = Attention_AR_counter('\ntest accuracy: ', # '/home/zhouyuxuan/latent-diffusion/dic_36.txt', False) self.font_path = font_path pygame.init() freetype.init() self.cal_psnr = ssim_psnr.calculate_psnr self.cal_ssim = ssim_psnr.SSIM() def VisionLAN_init(self, path=None): cfg = {'args': { 'strides': [(1, 1), (2, 2), (2, 2), (2, 2), (1, 1), (1, 1)], 'input_shape': [3, 64, 256], # C x H x W }, 'init_state_dict': '/home/zhouyuxuan/latent-diffusion/visionlan.pth', }
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} sem_loss = SemanticLoss() def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") print(sd.keys()) print(sd['epoch']) print(sd['global_step']) print(sd['callbacks']) # print(sd['optimizer_states']) # print(sd['lr_schedulers']) # print(sd['state_dict'].keys()) # exit(0) if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): # print('************************fuck',k) x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): # print('******************************in validation') _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, text_prior_enable=False, image_height=32, image_width=128, STN_enable=False, standard_text=False, VL_pretrained_path=None, fid_eval=False, visualize=False, down_sample_rate=2, recog_loss_enable=False, font_path=None, *args, **kwargs): self.fid_eval = fid_eval self.visualize = visualize self.text_prior_enable = text_prior_enable self.recog_loss_enable = recog_loss_enable self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True self.image_height = image_height self.image_width = image_width self.stn = STN_enable if self.stn: self.tps_inputsize = [image_height // down_sample_rate, image_width // down_sample_rate] tps_outputsize = [image_height // down_sample_rate, image_width // down_sample_rate] num_control_points = 20 tps_margins = [0.05, 0.05] self.tps = TPSSpatialTransformer( output_image_size=tuple(tps_outputsize), num_control_points=num_control_points, margins=tuple(tps_margins)) self.stn_head = STNHead( in_planes=3, num_ctrlpoints=num_control_points, activation='none', input_size=self.tps_inputsize) self.standard_text = standard_text if self.standard_text: # self.VL_model = self.VisionLAN_init(VL_pretrained_path) # self.test_acc_counter = Attention_AR_counter('\ntest accuracy: ', # '/home/zhouyuxuan/latent-diffusion/dic_36.txt', False) self.font_path = font_path pygame.init() freetype.init() self.cal_psnr = ssim_psnr.calculate_psnr self.cal_ssim = ssim_psnr.SSIM() def VisionLAN_init(self, path=None): cfg = {'args': { 'strides': [(1, 1), (2, 2), (2, 2), (2, 2), (1, 1), (1, 1)], 'input_shape': [3, 64, 256], # C x H x W }, 'init_state_dict': '/home/zhouyuxuan/latent-diffusion/visionlan.pth', }
model_VL = VisionLAN(**cfg['args'])
21
2023-11-20 06:34:21+00:00
16k
mjavadpur/mj_ONNX_SadTalker
inference_onnx.py
[ { "identifier": "AnimateFromCoeff", "path": "src/facerender/animate_onnx.py", "snippet": "class AnimateFromCoeff():\n\n def __init__(self, sadtalker_path, device):\n\n with open(sadtalker_path['facerender_yaml']) as f:\n config = yaml.safe_load(f)\n\n generator = OcclusionAwa...
from glob import glob from time import strftime from argparse import ArgumentParser from src.facerender.animate_onnx import AnimateFromCoeff from src.generate_batch import get_data from src.generate_facerender_batch import get_facerender_data from src.utils.init_path import init_path from src.utils.preprocess import CropAndExtract from src.test_audio2coeff import Audio2Coeff from src.generate_batch import get_data from src.generate_facerender_batch import get_facerender_data from src.utils.init_path import init_path from src.face3d.visualize import gen_composed_video import shutil import torch import os, sys, time import base64
12,717
# from src.facerender.animate import AnimateFromCoeff def main(args): #torch.backends.cudnn.enabled = False # tts_service = os.getenv("TTS_SERVER") facerender_batch_size = 10 startInference = time.time() pic_path = args.source_image audio_path = args.driven_audio save_dir = os.path.join(args.result_dir, strftime("%Y_%m_%d_%H.%M.%S")) os.makedirs(save_dir, exist_ok=True) pose_style = args.pose_style device = args.device batch_size = args.batch_size input_yaw_list = args.input_yaw input_pitch_list = args.input_pitch input_roll_list = args.input_roll ref_eyeblink = args.ref_eyeblink ref_pose = args.ref_pose current_root_path = os.path.split(sys.argv[0])[0] sadtalker_paths = init_path(args.checkpoint_dir, os.path.join(current_root_path, 'src/config'), args.size, args.old_version, args.preprocess) #init model preprocess_model = CropAndExtract(sadtalker_paths, device)
# from src.facerender.animate import AnimateFromCoeff def main(args): #torch.backends.cudnn.enabled = False # tts_service = os.getenv("TTS_SERVER") facerender_batch_size = 10 startInference = time.time() pic_path = args.source_image audio_path = args.driven_audio save_dir = os.path.join(args.result_dir, strftime("%Y_%m_%d_%H.%M.%S")) os.makedirs(save_dir, exist_ok=True) pose_style = args.pose_style device = args.device batch_size = args.batch_size input_yaw_list = args.input_yaw input_pitch_list = args.input_pitch input_roll_list = args.input_roll ref_eyeblink = args.ref_eyeblink ref_pose = args.ref_pose current_root_path = os.path.split(sys.argv[0])[0] sadtalker_paths = init_path(args.checkpoint_dir, os.path.join(current_root_path, 'src/config'), args.size, args.old_version, args.preprocess) #init model preprocess_model = CropAndExtract(sadtalker_paths, device)
audio_to_coeff = Audio2Coeff(sadtalker_paths, device)
5
2023-11-25 06:53:12+00:00
16k
microsoft/Project-BayesDAG
src/causica/preprocessing/data_processor.py
[ { "identifier": "CausalDataset", "path": "src/causica/datasets/dataset.py", "snippet": "class CausalDataset(Dataset):\n \"\"\"\n Class to store the np.ndarray adjacency matrix and samples\n from the intervention distributions as attributes of the Dataset object.\n \"\"\"\n\n def __init__...
import logging import warnings import numpy as np import torch from typing import Iterable, List, Optional, Tuple, TypeVar, Union from scipy import sparse from scipy.sparse import csr_matrix, issparse from sklearn.exceptions import NotFittedError from sklearn.preprocessing import OneHotEncoder, StandardScaler from sklearn.utils.validation import check_is_fitted from tqdm import tqdm from ..datasets.dataset import CausalDataset, Dataset, SparseDataset from ..datasets.intervention_data import InterventionData from ..datasets.variables import Variables from .transforms import IdentityTransform, UnitScaler
12,549
self._txt_unproc_cols, self._txt_proc_cols = [], [] self._num_processed_cols = sum(var.processed_dim for var in self._variables) def process_data_and_masks( self, data: csr_matrix, data_mask: csr_matrix, *extra_masks: csr_matrix, batch_size: int = 1000, ) -> Tuple[csr_matrix, ...]: """ Process and validate data, data mask and optionally any number of additional masks. These masks will all be applied to the data when performing data range validation, in case of e.g. dummy zero data that is masked out by an additional obs_mask. Args: data: Unprocessed data array data_mask: Data indicating which values in `data` are observed. Can be any dtype provided all values are either 0 or 1. extra_masks: Additional masks to be processed, if any. Can be any dtype provided all values are either 0 or 1. batch_size: Batch size used during data preprocessing for sparse matrices. Returns: processed_data: Data with categorical variables expanded to a one-hot encoding, and features normalised. processed_data_mask: Boolean mask with categorical variables expanded to a one-hot encoding. processed_extra_masks: Any additional boolean masks with categorical variables expanded to a one-hot encoding. """ if not issparse(data): ( proc_data, proc_data_mask, *proc_extra_masks, ) = self._process_and_check_dense(data, data_mask, *extra_masks) else: # Break sparse data into smaller batches and preprocess each as a dense array. Somewhat inefficient but # allows us to reuse our preprocessing functions and keeps memory usage manageable. proc_data_list: List[csr_matrix] = [] proc_data_mask_list: List[csr_matrix] = [] proc_extra_masks_lists: Tuple[List[csr_matrix], ...] = tuple([] for mask in extra_masks) num_rows = data.shape[0] for start_idx in tqdm(range(0, num_rows, batch_size), desc="Data preprocessing"): stop_idx = min(start_idx + batch_size, num_rows) data_batch = data[start_idx:stop_idx].toarray() data_mask_batch = data_mask[start_idx:stop_idx].toarray() extra_masks_batch = tuple(mask[start_idx:stop_idx].toarray() for mask in extra_masks) # TODO: we will currently lose sparsity for rescaled continuous data here, since 0 will be mapped to # another value. We could multiply by the mask to zero out unobserved data but we need to make sure this # doesn't have any unintended consequences for cases with more complex masking, e.g. active learning ( proc_data_batch, proc_data_mask_batch, *proc_extra_masks_batch, ) = self._process_and_check_dense(data_batch, data_mask_batch, *extra_masks_batch) proc_data_list.append(csr_matrix(proc_data_batch)) proc_data_mask_list.append(csr_matrix(proc_data_mask_batch)) for mask_list, mask in zip(proc_extra_masks_lists, proc_extra_masks_batch): mask_list.append(csr_matrix(mask)) proc_data = sparse.vstack(proc_data_list, format="csr") proc_data_mask = sparse.vstack(proc_data_mask_list, format="csr") proc_extra_masks = tuple( sparse.vstack(proc_mask_list, format="csr") for proc_mask_list in proc_extra_masks_lists ) return (proc_data, proc_data_mask, *proc_extra_masks) def _process_and_check_dense(self, data: np.ndarray, data_mask: np.ndarray, *extra_masks: np.ndarray): """ Check validity of dense data and masks and process them. """ combined_mask = data_mask for mask in extra_masks: combined_mask = combined_mask * mask self.check_data(data, combined_mask) self.check_mask(data_mask) for mask in extra_masks: self.check_mask(mask) proc_data = self.process_data(data) proc_data_mask = self.process_mask(data_mask) proc_extra_masks = tuple(self.process_mask(mask) for mask in extra_masks) return (proc_data, proc_data_mask, *proc_extra_masks) def process_intervention_data( self, intervention_data: Union[InterventionData, Iterable[InterventionData]] ) -> List[InterventionData]: """Preprocesses data in the InterventionData format and returns a list of processed InterventionData objects. Args: intervention_data (Union[InterventionData, Iterable[InterventionData]]): InterventionData object or list of InterventionData objects to be processed. Returns: List[InterventionData]: List of processed InterventionData objects. """ if isinstance(intervention_data, InterventionData): intervention_data = [intervention_data] proc_intervention = [ InterventionData( i.intervention_idxs, self.process_data_subset_by_group(i.intervention_values, i.intervention_idxs), self.process_data(i.test_data), i.conditioning_idxs, self.process_data_subset_by_group(i.conditioning_values, i.conditioning_idxs), i.effect_idxs, self.process_data_subset_by_group(i.intervention_reference, i.intervention_idxs), self.process_data(i.reference_data) if i.reference_data is not None else None, ) for i in intervention_data ] return proc_intervention def process_dataset(
EPSILON = 1e-5 logger = logging.getLogger(__name__) V = TypeVar("V", np.ndarray, torch.Tensor) # pylint: disable=protected-access class DataProcessor: def __init__( self, variables: Variables, unit_scale_continuous: bool = True, standardize_data_mean: bool = False, standardize_data_std: bool = False, ): """ Args: variables (Variables): Information about variables/features used by this model. unit_scale_continuous (bool): Scale continuous variables to the range of [0, 1]. standardize_data_mean (bool): Standardize continuous variables to mean=0 standardize_data_std (bool): Standardize continuous variables to std=1 """ if unit_scale_continuous and (standardize_data_mean or standardize_data_std): raise ValueError("Cannot unit scale and standardize variables simultanously.") self._variables = variables # Call unprocessed columns unproc_cols, processed columns proc_cols unproc_cols_by_type = self._variables.unprocessed_cols_by_type proc_cols_by_type = self._variables.processed_cols_by_type def flatten(lists): # Flatten proc_cols for continuous and binary unproc_cols, since they will be of form [[1], [2], ...] return [i for sublist in lists for i in sublist] if "binary" in unproc_cols_by_type: self._bin_unproc_cols = unproc_cols_by_type["binary"] self._bin_proc_cols = flatten(proc_cols_by_type["binary"]) # Save contiguous regions containig binary features to allow for more efficient processing via slicing self._bin_unproc_regions = self.split_contiguous_sublists(self._bin_unproc_cols) self._bin_proc_regions = self.split_contiguous_sublists(self._bin_proc_cols) assert len(self._bin_unproc_regions) == len(self._bin_proc_regions) else: self._bin_unproc_cols, self._bin_proc_cols = [], [] if "continuous" in unproc_cols_by_type: self._cts_unproc_cols = unproc_cols_by_type["continuous"] self._cts_proc_cols = flatten(proc_cols_by_type["continuous"]) # Save contiguous regions containing continuous features to allow for more efficient processing via slicing if all(x.overwrite_processed_dim is None for x in self._variables): self._cts_unproc_regions = self.split_contiguous_sublists(self._cts_unproc_cols) self._cts_proc_regions = self.split_contiguous_sublists(self._cts_proc_cols) else: # For VAEM, we can only take single variable as region # to allow for processing/reverting mask self._cts_unproc_regions = [[col_id] for col_id in unproc_cols_by_type["continuous"]] self._cts_proc_regions = proc_cols_by_type["continuous"] assert len(self._cts_unproc_regions) == len(self._cts_proc_regions) if unit_scale_continuous: self._cts_normalizers = [ UnitScaler(variables[i] for i in unproc_region) for unproc_region in self._cts_unproc_regions ] elif standardize_data_mean or standardize_data_std: self._cts_normalizers = [ StandardScaler(with_mean=standardize_data_mean, with_std=standardize_data_std) for _ in self._cts_unproc_regions ] else: self._cts_normalizers = [IdentityTransform()] * len(self._cts_unproc_regions) else: self._cts_unproc_cols, self._cts_proc_cols, self._cts_normalizers = [], [], [] if "categorical" in unproc_cols_by_type: self._cat_unproc_cols = unproc_cols_by_type["categorical"] self._cat_proc_cols = flatten(proc_cols_by_type["categorical"]) self._cat_proc_cols_grouped = proc_cols_by_type["categorical"] def get_lower(idx): return self._variables[idx].lower def get_upper(idx): return self._variables[idx].upper var_categories = [ np.arange(int(get_lower(var_idx)), int(get_upper(var_idx)) + 1) for var_idx in self._cat_unproc_cols ] self._one_hot_encoder = OneHotEncoder(categories=var_categories, sparse=False, handle_unknown="ignore") # Fit on dummy data due to an issue in sklearn where the encoder needs to be fitted to data even if the # categories are specified upon creation. self._one_hot_encoder.fit(np.array([categories[0] for categories in var_categories]).reshape(1, -1)) else: self._cat_unproc_cols, self._cat_proc_cols = [], [] self._txt_unproc_cols, self._txt_proc_cols = [], [] self._num_processed_cols = sum(var.processed_dim for var in self._variables) def process_data_and_masks( self, data: csr_matrix, data_mask: csr_matrix, *extra_masks: csr_matrix, batch_size: int = 1000, ) -> Tuple[csr_matrix, ...]: """ Process and validate data, data mask and optionally any number of additional masks. These masks will all be applied to the data when performing data range validation, in case of e.g. dummy zero data that is masked out by an additional obs_mask. Args: data: Unprocessed data array data_mask: Data indicating which values in `data` are observed. Can be any dtype provided all values are either 0 or 1. extra_masks: Additional masks to be processed, if any. Can be any dtype provided all values are either 0 or 1. batch_size: Batch size used during data preprocessing for sparse matrices. Returns: processed_data: Data with categorical variables expanded to a one-hot encoding, and features normalised. processed_data_mask: Boolean mask with categorical variables expanded to a one-hot encoding. processed_extra_masks: Any additional boolean masks with categorical variables expanded to a one-hot encoding. """ if not issparse(data): ( proc_data, proc_data_mask, *proc_extra_masks, ) = self._process_and_check_dense(data, data_mask, *extra_masks) else: # Break sparse data into smaller batches and preprocess each as a dense array. Somewhat inefficient but # allows us to reuse our preprocessing functions and keeps memory usage manageable. proc_data_list: List[csr_matrix] = [] proc_data_mask_list: List[csr_matrix] = [] proc_extra_masks_lists: Tuple[List[csr_matrix], ...] = tuple([] for mask in extra_masks) num_rows = data.shape[0] for start_idx in tqdm(range(0, num_rows, batch_size), desc="Data preprocessing"): stop_idx = min(start_idx + batch_size, num_rows) data_batch = data[start_idx:stop_idx].toarray() data_mask_batch = data_mask[start_idx:stop_idx].toarray() extra_masks_batch = tuple(mask[start_idx:stop_idx].toarray() for mask in extra_masks) # TODO: we will currently lose sparsity for rescaled continuous data here, since 0 will be mapped to # another value. We could multiply by the mask to zero out unobserved data but we need to make sure this # doesn't have any unintended consequences for cases with more complex masking, e.g. active learning ( proc_data_batch, proc_data_mask_batch, *proc_extra_masks_batch, ) = self._process_and_check_dense(data_batch, data_mask_batch, *extra_masks_batch) proc_data_list.append(csr_matrix(proc_data_batch)) proc_data_mask_list.append(csr_matrix(proc_data_mask_batch)) for mask_list, mask in zip(proc_extra_masks_lists, proc_extra_masks_batch): mask_list.append(csr_matrix(mask)) proc_data = sparse.vstack(proc_data_list, format="csr") proc_data_mask = sparse.vstack(proc_data_mask_list, format="csr") proc_extra_masks = tuple( sparse.vstack(proc_mask_list, format="csr") for proc_mask_list in proc_extra_masks_lists ) return (proc_data, proc_data_mask, *proc_extra_masks) def _process_and_check_dense(self, data: np.ndarray, data_mask: np.ndarray, *extra_masks: np.ndarray): """ Check validity of dense data and masks and process them. """ combined_mask = data_mask for mask in extra_masks: combined_mask = combined_mask * mask self.check_data(data, combined_mask) self.check_mask(data_mask) for mask in extra_masks: self.check_mask(mask) proc_data = self.process_data(data) proc_data_mask = self.process_mask(data_mask) proc_extra_masks = tuple(self.process_mask(mask) for mask in extra_masks) return (proc_data, proc_data_mask, *proc_extra_masks) def process_intervention_data( self, intervention_data: Union[InterventionData, Iterable[InterventionData]] ) -> List[InterventionData]: """Preprocesses data in the InterventionData format and returns a list of processed InterventionData objects. Args: intervention_data (Union[InterventionData, Iterable[InterventionData]]): InterventionData object or list of InterventionData objects to be processed. Returns: List[InterventionData]: List of processed InterventionData objects. """ if isinstance(intervention_data, InterventionData): intervention_data = [intervention_data] proc_intervention = [ InterventionData( i.intervention_idxs, self.process_data_subset_by_group(i.intervention_values, i.intervention_idxs), self.process_data(i.test_data), i.conditioning_idxs, self.process_data_subset_by_group(i.conditioning_values, i.conditioning_idxs), i.effect_idxs, self.process_data_subset_by_group(i.intervention_reference, i.intervention_idxs), self.process_data(i.reference_data) if i.reference_data is not None else None, ) for i in intervention_data ] return proc_intervention def process_dataset(
self, dataset: Union[Dataset, CausalDataset, SparseDataset]
1
2023-11-21 12:55:08+00:00
16k
ChenyangGao/python-epub3
epub3/epub.py
[ { "identifier": "File", "path": "epub3/util/file.py", "snippet": "class File:\n __slots__ = (\"path\", \"fs\", \"open\", \"open_modes\", \"_getattr\")\n ALL_MODES = frozenset(\"rwxab+\")\n\n def __init__(\n self, \n /, \n path=None, \n fs=None, \n open_modes=N...
import errno import io import os import os.path as ospath import posixpath from copy import deepcopy from datetime import datetime from fnmatch import translate as wildcard_translate from functools import cached_property, partial from inspect import getfullargspec, isclass from io import IOBase, TextIOWrapper from operator import methodcaller from os import fsdecode, remove, stat, stat_result, PathLike from pathlib import PurePosixPath from posixpath import join as joinpath, normpath from pprint import pformat from re import compile as re_compile, escape as re_escape, Pattern from shutil import copy, copyfileobj from typing import cast, Any, Callable, Container, Mapping, MutableMapping, Optional from types import MappingProxyType from uuid import uuid4 from warnings import warn from weakref import WeakKeyDictionary, WeakValueDictionary from urllib.parse import quote, unquote from zipfile import ZipFile, ZIP_STORED from .util.file import File, RootFS, TemporaryFS, OPEN_MODES from .util.helper import guess_media_type, values, items, sup from .util.proxy import proxy_property, ElementAttribProxy, ElementProxy, NAMESPACES from .util.remap import remap_links from .util.stream import PyLinq from .util.xml import el_add, el_del, el_iterfind, el_set from .util.undefined import undefined, UndefinedType from lxml.etree import fromstring, tostring, _Element as Element, _ElementTree as ElementTree # type: ignore from xml.etree.ElementTree import fromstring, tostring, Element, ElementTree # type: ignore
13,836
): if mode not in OPEN_MODES: raise ValueError(f"invalid open mode: {mode!r}") if isinstance(href, Item): if href not in self: raise LookupError(f"no such item: {href!r}") href = unquote(href["href"]) else: if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" href_to_file = self._href_to_file if href in self._href_to_id: if "x" in mode: raise FileExistsError(errno.EEXIST, f"file exists: {href!r}") file = href_to_file.get(href) uid = str(uuid4()) if file is None: href_to_file[href] = file = File(uid, self._workfs) elif not file.check_open_mode(mode): if "w" not in mode: try: fsrc = file.open("rb", buffering=0) except FileNotFoundError: if "r" in mode: raise else: with fsrc: copyfileobj(fsrc, self._workfs.open(uid, "wb")) href_to_file[href] = file = File(uid, self._workfs) elif "r" in mode: raise FileNotFoundError(errno.ENOENT, f"no such file: {href!r}") else: item = self.add(href) file = href_to_file[href] if "b" not in mode and encoding is None: encoding = "utf-8" return file.open( mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def read(self, href, /, buffering=0): with self.open(href, "rb", buffering=buffering) as f: return f.read() read_bytes = read def read_text(self, href, /, encoding=None): with self.open(href, "r", encoding=encoding) as f: return f.read() def remove(self, href, /): if isinstance(href, Item): if href not in self: raise LookupError(f"no such item: {href!r}") href = unquote(href["href"]) else: if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" try: id = self._href_to_id.pop(href) except LookupError: raise FileNotFoundError(errno.ENOENT, f"no such file: {href!r}") item = super().pop(id, None) if item is not None: try: self._root.remove(item._root) except: pass file = self._href_to_file.pop(href, None) if file is not None and file.check_open_mode("w"): try: file.remove() except: pass def _rename(self, item, href, dest_href, /): try: id = self._href_to_id[dest_href] = self._href_to_id.pop(href) except LookupError: raise FileNotFoundError(errno.ENOENT, f"no such file: {href!r}") if item is None: item = super().__getitem__(id) item._attrib["href"] = quote(dest_href, safe=":/?&=#") self._href_to_file[dest_href] = self._href_to_file.pop(href, None) def rename(self, href, dest_href, /, repair=False): result = {} if isinstance(href, Item): item = href if item not in self: raise LookupError(f"no such item: {item!r}") href = unquote(item._attrib["href"]) else: if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" item = None if isinstance(dest_href, (bytes, PathLike)): dest_href = fsdecode(dest_href) else: dest_href = str(dest_href) assert (dest_href := dest_href.strip("/")), "empty href" result["pathpair"] = (href, dest_href) if href != dest_href: if dest_href in self._href_to_id: raise FileExistsError(errno.EEXIST, f"target file exists: {dest_href!r}") self._rename(item, href, dest_href) if repair:
#!/usr/bin/env python # coding: utf-8 __author__ = "ChenyangGao <https://chenyanggao.github.io>" __version__ = (0, 0, 1) __all__ = ["ePub", "Metadata", "DCTerm", "Meta", "Link", "Manifest", "Item", "Spine", "Itemref"] try: except ModuleNotFoundError: class DCTerm(ElementProxy): pass class Meta(ElementProxy): __protected_keys__ = ("property",) __optional_keys__ = ("dir", "id", "refines", "scheme", "xml:lang") class Link(ElementAttribProxy): __protected_keys__ = ("href", "rel") __optional_keys__ = ("hreflang", "id", "media-type", "properties", "refines") class Item(ElementAttribProxy): __const_keys__ = ("id",) __protected_keys__ = ("href", "media-type") __optional_keys__ = ("fallback", "media-overlay", "properties") __cache_get_state__ = lambda _, manifest: manifest def __init__(self, root: Element, manifest, /): super().__init__(root) self._manifest = manifest def __eq__(self, other, /): if type(self) is not type(other): return NotImplemented return self._manifest is other._manifest and self._attrib["href"] == other._attrib["href"] def __fspath__(self, /): return unquote(self._attrib["href"]) def __hash__(self, /): return hash((self._root, id(self._manifest))) def __setitem__(self, key, value, /): if key == "href": if value is None: raise ValueError("can't set href to None") self.rename(val) else: super().__setitem__(key, value) return self @property def filename(self, /): return PurePosixPath(joinpath(self.home, self)) @property def home(self, /): return PurePosixPath(self._manifest._epub._opf_dir) @property def name(self, /): return self.path.name @property def path(self, /): return PurePosixPath(self) @property def _parent(self, /): return posixpath.dirname(unquote(self._attrib["href"])) @property def parent(self, /): return self.path.parent @property def parents(self, /): return self.path.parents @property def parts(self, /): return self.path.parts @property def stem(self, /): return self.path.stem @property def suffix(self, /): return self.path.suffix @property def suffixes(self, /): return self.path.suffixes def update(self, attrib=None, /, **attrs): if attrib: attrib = dict(attrib) if attrs: attrib.update(attrs) else: attrib = attrs href = attrib.pop("href", None) if href: self.rename(href) if attrib: super().update(attrib) return self def is_relative_to(self, /, *other): return self.path.is_relative_to(*other) def joinpath(self, /, *others): return PurePosixPath(normpath(joinpath(self._parent, *others))) __truediv__ = joinpath def relpath(self, other, /): return PurePosixPath(posixpath.relpath(other, self._parent)) def relative_to(self, /, *other): return self.path.relative_to(*other) def with_name(self, /, name): return self.path.with_name(str(name)) def with_stem(self, /, stem): return self.path.with_stem(str(stem)) def with_suffix(self, /, suffix): return self.path.with_suffix(str(suffix)) def exists(self, /): return self._manifest.exists(self) def is_file(self, /): return self.exists() def is_dir(self, /): return False def is_symlink(self, /): return False def glob(self, /, pattern="*", ignore_case=False): return self._manifest.glob(pattern, self, ignore_case=ignore_case) def rglob(self, /, pattern="", ignore_case=False): return self._manifest.rglob(pattern, self, ignore_case=ignore_case) def iterdir(self, /): return self._manifest.iterdir(self) def match(self, /, path_pattern, ignore_case=False): path_pattern = path_pattern.strip("/") if not path_pattern: return False pattern = joinpath(*posix_glob_translate_iter(path_pattern)) if ignore_case: pattern = "(?i:%s)" % pattern return re_compile(pattern).fullmatch(self._attrib["href"]) is not None def open( self, /, mode="r", buffering=-1, encoding=None, errors=None, newline=None, ): return self._manifest.open( self, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def read(self, /, buffering=0): return self._manifest.read(self, buffering=buffering) read_bytes = read def read_text(self, /, encoding=None): return self._manifest.read_text(self, encoding=encoding) def remove(self, /): self._manifest.remove(self) return self def rename(self, dest_href, /, repair=False): return self._manifest.rename(self, dest_href, repair=repair) def batch_rename(self, mapper, /, predicate=None, repair=False): return self._manifest.batch_rename(self, mapper, predicate=predicate, repair=repair) def replace(self, href, /): self._manifest.replace(self, href) return self def stat(self, /) -> Optional[stat_result]: return self._manifest.stat(self) def touch(self, /): self._manifest.touch(self) return self unlink = remove def write(self, /, data): return self._manifest.write(self, data) write_bytes = write def write_text(self, /, text, encoding=None, errors=None, newline=None): return self._manifest.write_text(self, text, encoding=encoding, errors=errors, newline=newline) class Itemref(ElementAttribProxy): __const_keys__ = ("idref",) __optional_keys__ = ("id", "linear", "properties") @property def linear(self, /): return "no" if self._attrib.get("linear") == "no" else "yes" @linear.setter def linear(self, value, /): self._attrib["linear"] = "no" if value == "no" else "yes" class Metadata(ElementProxy): __wrap_class_map__ = {"{*}meta": Meta, "{*}": Link, "dc:*": DCTerm} def __repr__(self, /): return f"{super().__repr__()}\n{pformat(self.iter().list())}" @property def info(self, /): return tuple(meta.info for meta in self.iter()) def add( self, name: str = "meta", /, attrib: Optional[Mapping] = None, text: Optional[str] = None, tail: Any = undefined, **_disregards, ): return super().add(name, attrib=attrib, text=text) def dc( self, name: str, text_value: UndefinedType | Optional[str] = undefined, /, find_attrib: Optional[Mapping] = None, attrib: Optional[Mapping] = None, text: Optional[str] = None, merge: bool = False, delete: bool = False, auto_add: bool = False, ): if text_value is not undefined: if find_attrib: find_attrib = {**find_attrib, "": text_value} else: find_attrib = {"": text_value} return self.setfind( "dc:%s" % name, find_attrib=find_attrib, attrib=attrib, text=text, merge=merge, delete=delete, auto_add=auto_add, ) def meta( self, preds: str = "", /, find_attrib: Optional[Mapping] = None, attrib: Optional[Mapping] = None, text: Optional[str] = None, merge: bool = False, delete: bool = False, auto_add: bool = False, ): return self.setfind( "{*}meta%s" % preds, find_attrib=find_attrib, attrib=attrib, text=text, merge=merge, delete=delete, auto_add=auto_add, ) def name_meta( self, name, content: Optional[str] = None, /, find_attrib: Optional[Mapping] = None, attrib: Optional[Mapping] = None, text: Optional[str] = None, merge: bool = False, delete: bool = False, auto_add: bool = False, ): if find_attrib: find_attrib = {**find_attrib, "name": name} else: find_attrib = {"name": name} if content is not None: find_attrib["content"] = content return self.meta( find_attrib=find_attrib, attrib=attrib, text=text, merge=merge, delete=delete, auto_add=auto_add, ) def property_meta( self, property, text_value: UndefinedType | Optional[str] = undefined, /, find_attrib: Optional[Mapping] = None, attrib: Optional[Mapping] = None, text: Optional[str] = None, merge: bool = False, delete: bool = False, auto_add: bool = False, ): if find_attrib: find_attrib = {**find_attrib, "property": property} else: find_attrib = {"property": property} if text_value is not undefined: find_attrib[""] = text_value return self.meta( find_attrib=find_attrib, attrib=attrib, text=text, merge=merge, delete=delete, auto_add=auto_add, ) class ManifestProxy(ElementAttribProxy): __optional_keys__ = ("id",) class Manifest(dict[str, Item]): def __init__(self, /, root: Element, epub): self._root = root self._attrib = root.attrib self._epub = epub self._proxy = ManifestProxy(root) self._href_to_id: dict[str, str] = {} self._href_to_file: dict[str, File] = {} if len(root): href_to_id = self._href_to_id dangling_items = [] for item in root.iterfind("{*}item"): id = item.attrib.get("id") href = item.attrib.get("href") if id is None or not href: dangling_items.append(item) continue id = cast(str, id) href = cast(str, unquote(href)) super().__setitem__(id, Item(item, self)) href_to_id[href] = id if dangling_items: for item in reversed(dangling_items): root.remove(item) warn(f"removed a dangling item element: {item!r}") zfile = epub.__dict__.get("_zfile") opf_dir = epub._opf_dir if zfile: href_to_file = self._href_to_file for href in href_to_id: zpath = joinpath(opf_dir, href) zinfo = zfile.NameToInfo.get(zpath) if not zinfo or zinfo.is_dir(): warn(f"missing file in original epub: {href!r}") href_to_file[href] = File(str(uuid4()), self._workfs) else: href_to_file[href] = File(zpath, zfile, open_modes="r") def __init_subclass__(self, /, **kwargs): raise TypeError("subclassing is not allowed") def __call__(self, href, /): if isinstance(href, Item): if href not in self: raise LookupError(f"no such item: {href!r}") return href if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" try: id = self._href_to_id[href] except LookupError as e: raise FileNotFoundError(errno.ENOENT, f"no such file: {href!r}") from e return super().__getitem__(id) def __contains__(self, other, /): if isinstance(other, Item): return other._manifest is self and super().__contains__(other["id"]) return super().__contains__(other) def __delitem__(self, key, /): pop = self.pop if isinstance(key, int): el = self._root[key] try: id = el.attrib["id"] except AttributeError: try: self._root.remove(el) except: pass else: pop(id) elif isinstance(key, slice): root = self._root for el in root[key]: try: id = el.attrib["id"] except AttributeError: try: root.remove(el) except: pass else: pop(id, None) elif isinstance(key, Item): if key not in self: raise LookupError(f"no such item: {key!r}") pop(key["id"]) elif isinstance(key, str): pop(key) else: raise TypeError("`key` only accepts: `str`, `int`, `slice`, `Item`") return self def __getitem__(self, key, /): def wrap(el): try: if el.tag == "item" or el.tag.endswith("}item"): return Item(el, self) return ElementProxy(el) except AttributeError: return el if isinstance(key, int): return wrap(self._root[key]) elif isinstance(key, slice): return list(map(wrap, self._root[key])) elif isinstance(key, Item): if key not in self: raise LookupError(f"no such item: {key!r}") return key elif isinstance(key, str): return super().__getitem__(key) else: raise TypeError("`key` only accepts: `str`, `int`, `slice`, `Item`") def __setitem__(self, id, value, /): if id not in self: raise LookupError(f"no such item: {id!r}") if isinstance(id, Item): item = id else: item = super().__getitem__(id) href = unquote(item._attrib["href"]) if isinstance(value, str): self.rename(href, value) elif isinstance(value, bytes): self.write(href, value) elif isinstance(value, Mapping): if "open" in value and callable(value["open"]): self._href_to_file[href] = File(value, open_modes="rb") else: item.update(value) else: self._href_to_file[href] = File(value, open_modes="rb") return self @cached_property def _workfs(self, /): if self._epub._maketemp: return TemporaryFS(self._epub._workroot) else: return RootFS(self._epub._workroot) @cached_property def href_to_id(self, /): return MappingProxyType(self._href_to_id) @cached_property def href_to_file(self, /): return MappingProxyType(self._href_to_file) @property def home(self, /): return self._epub._opf_dir @property def attrib(self, /): return self._attrib @property def proxy(self, /): return self._proxy @property def info(self, /): return tuple(item.info for item in self.values()) delete = __delitem__ def clear(self, /): self._root.clear() self._href_to_file.clear() self._href_to_id.clear() super().clear() return self def pop(self, id, /, default=undefined): if id not in self: if default is undefined: raise LookupError(f"no such item: {id!r}") return default if isinstance(id, Item): id = id["id"] item = super().pop(id) try: self._root.remove(item._root) except: pass href = unquote(item._attrib["href"]) self._href_to_id.pop(href, None) file = self._href_to_file.pop(href, None) if file is not None and file.check_open_mode("w"): try: file.remove() except: pass return item def popitem(self, /): id, item = super().popitem() try: self._root.remove(item._root) except: pass href = unquote(item._attrib["href"]) self._href_to_id.pop(href, None) file = self._href_to_file.pop(href, None) if file is not None and file.check_open_mode("w"): try: file.remove() except: pass return id, item def set(self, id, value, /): if isinstance(id, Item): if id not in self: raise LookupError(f"no such item: {id!r}") item = id else: item = super().get(id) if item is None: if isinstance(value, str): item = self.add(href, id=id) elif isinstance(value, Mapping) and "href" in value: if "open" in value and callable(value["open"]): item = self.add(value["href"], value, id=id) else: item = self.add(value["href"], id=id, attrib=value) else: raise LookupError(f"no such item: {id!r}") else: href = unquote(item._attrib["href"]) if isinstance(value, str): self.rename(href, value) elif isinstance(value, bytes): self.write(href, value) elif isinstance(value, Mapping): if "open" in value and callable(value["open"]): self._href_to_file[href] = File(value, open_modes="rb") else: item.update(value) else: self._href_to_file[href] = File(value, open_modes="rb") return item def setdefault(self, id, value, /): if isinstance(id, Item): if id not in self: raise LookupError(f"no such item: {id!r}") item = id else: item = super().get(id) if item is None: if isinstance(value, str): item = self.add(value, id=id) elif isinstance(value, Mapping) and "href" in value: if "open" in value and callable(value["open"]): item = self.add(value["href"], value, id=id) else: item = self.add(value["href"], id=id, attrib=value) else: raise LookupError(f"no such item: {id!r}") else: if isinstance(value, Mapping) and not ("open" in value and callable(value["open"])): item.merge(value) return item def merge(self, id_or_attrib=None, /, **attrs): if attrs: if isinstance(id_or_attrib, Item): item = id_or_attrib if item not in self: raise LookupError(f"no such item: {item!r}") item.merge(attrib=attrs) elif isinstance(id_or_attrib, str): id = id_or_attrib item = super().get(id) if item is None: if "href" in attrs: href = attrs.pop("href") self.add(href, id=id, attrib=attrs) else: raise LookupError(f"no such item: {id!r}") else: item.merge(attrs) else: self._proxy.merge(id_or_attrib, **attrs) elif isinstance(id_or_attrib, Mapping): self._proxy.merge(id_or_attrib) return self def update(self, id_or_attrib=None, /, **attrs): if attrs: if isinstance(id_or_attrib, Item): item = id_or_attrib if item not in self: raise LookupError(f"no such item: {item!r}") item.update(attrib=attrs) elif isinstance(id_or_attrib, str): id = id_or_attrib item = super().get(id) if item is None: if "href" in attrs: href = attrs.pop("href") self.add(href, id=id, attrib=attrs) else: raise LookupError(f"no such item: {id!r}") else: item.update(attrs) else: self._proxy.update(id_or_attrib, **attrs) elif isinstance(id_or_attrib, Mapping): self._proxy.update(id_or_attrib) return self #################### SubElement Methods #################### @PyLinq.streamify def filter(self, /, predicate=None): if not callable(predicate): return iter(self.values()) return filter(predicate, self.values()) @PyLinq.streamify def filter_by_attr(self, predicate=None, attr="media-type", /): def activate_predicate(predicate): if predicate is None: return None if callable(predicate): return predicate elif isinstance(predicate, Pattern): return predicate.search elif isinstance(predicate, str): use_false = False if predicate.startswith(r"!"): use_false = True predicate = predicate[1:] predicate_startswith = predicate.startswith if predicate_startswith(r"="): predicate = predicate[1:].__eq__ elif predicate_startswith(r"~"): predicate = methodcaller("__contains__", predicate[1:]) elif predicate_startswith(r"^"): predicate = methodcaller("startswith", predicate[1:]) elif predicate_startswith(r"$"): predicate = methodcaller("endswith", predicate[1:]) elif predicate_startswith(r";"): predicate = lambda s, needle=predicate[1:]: needle in s.split() elif predicate_startswith(r","): predicate = lambda s, needle=predicate[1:]: needle in s.split(",") elif predicate_startswith(r"<"): predicate = re_compile(r"\b"+re_escape(predicate[1:])).search elif predicate_startswith(r">"): predicate = re_compile(re_escape(predicate[1:])+r"\b").search elif predicate_startswith(r"|"): predicate = re_compile(r"\b"+re_escape(predicate[1:])+r"\b").search elif predicate_startswith(r"*"): predicate = re_compile(wildcard_translate(predicate[1:])).fullmatch elif predicate_startswith(r"/"): predicate = re_compile(predicate[1:]).search elif predicate_startswith(r"%"): predicate = re_compile(predicate[1:]).fullmatch else: predicate = predicate.__eq__ if use_false: predicate = lambda s, _pred=predicate: not _pred(s) return predicate elif type(predicate) in (tuple, list): preds = tuple(pred for p in predicate if (pred:=activate_predicate(p)) is not None) if not preds: return None if type(predicate) is tuple: return lambda s, _preds=preds: any(p(s) for p in preds) else: return lambda s, _preds=preds: all(p(s) for p in preds) elif isinstance(predicate, Container): return predicate.__contains__ predicate = activate_predicate(predicate) if predicate is None: return filter(lambda item: attr in item, self.values()) return filter(lambda item: attr in item and predicate(item[attr]), self.values()) @PyLinq.streamify def iter(self, /): root = self._root for el in root.iterfind("*"): if not (el.tag == "item" or el.tag.endswith("}item")): yield ElementProxy(el) continue id = el.attrib.get("id") href = el.attrib.get("href") if not href: if id is None or not super().__contains__(id): try: root.remove(el) warn(f"removed a dangling item element: {el!r}") except: pass else: item = super().__getitem__(id) if item._root is not el: raise RuntimeError(f"different item elements {el!r} and {item._root!r} share the same id {id!r}") else: self.pop(id, None) warn(f"removed an item because of missing href attribute: {item!r}") continue href = unquote(href) if not el.attrib.get("media-type"): el.attrib["media-type"] = guess_media_type(href) if id is None: yield self.add(href) elif super().__contains__(id): item = super().__getitem__(id) if item._root is not el: raise RuntimeError(f"different item elements {el!r} and {item._root!r} share the same id {id!r}") yield item else: try: self._root.remove(el) warn(f"removed a dangling item element: {el!r}") except: pass def list(self, /, mapfn=None): if mapfn is None: return list(self.iter()) return list(map(mapfn, self.iter())) def audio_iter(self, /): return self.filter_by_attr("^audio/") def css_iter(self, /): return self.filter_by_attr("text/css") def font_iter(self, /): return self.filter_by_attr(("^font/", "^application/font-")) def image_iter(self, /): return self.filter_by_attr("^image/") def javascript_iter(self, /): return self.filter_by_attr(("text/javascript", "application/javascript", "application/ecmascript")) def media_iter(self, /): return self.filter_by_attr(("^audio/", "^image/", "^video/")) def text_iter(self, /): return self.filter_by_attr(("^text/", "$+xml")) def video_iter(self, /): return self.filter_by_attr("^video/") @PyLinq.streamify def html_item_ref_pair_iter(self, /): spine = self._epub.spine for id, itemref in spine.items(): yield self[id], itemref for item in self.filter_by_attr(("text/html", "application/xhtml+xml")): if item["id"] in spine: continue yield item, None #################### File System Methods #################### def add( self, href, /, file=None, fs=None, open_modes="r", id=None, media_type=None, attrib=None, ): if isinstance(href, Item): raise TypeError("can't directly add `Item` object") if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" if href in self._href_to_id: raise FileExistsError(errno.EEXIST, f"file exists: {href!r}") uid = str(uuid4()) if id is None: generate_id = self._epub._generate_id if generate_id is None: id = uid else: keys = self.keys() id = generate_id(href, keys) while id in keys: nid = generate_id(href, keys) if nid == id: i = sup(lambda i: f"{i}_{nid}" in keys) id = f"{i}_{nid}" break id = nid if id in self: raise LookupError(f"id already exists: {id!r}") attrib = dict(attrib) if attrib else {} attrib["id"] = id attrib["href"] = quote(href, safe=":/?&=#") if media_type: attrib["media-type"] = media_type if fs is not None: file = File(file, fs=fs, open_modes=open_modes) elif file is None: file = File(uid, self._workfs) elif isinstance(file, IOBase) or hasattr(file, "read") and not hasattr(file, "open"): file0 = file file = File(uid, self._workfs) test_data = file0.read(0) if test_data == b"": copyfileobj(file0, self._workfs.open(uid, "wb")) elif test_data == "": attrib.setdefault("media-type", "text/plain") copyfileobj(file0, self._workfs.open(uid, "w")) else: raise TypeError(f"incorrect read behavior: {file0!r}") else: file = File(file, open_modes=open_modes) if not attrib.get("media-type"): attrib["media-type"] = guess_media_type(href) item = Item(el_add(self._root, "item", attrib=attrib, namespaces=NAMESPACES), self) super().__setitem__(id, item) self._href_to_id[href] = id self._href_to_file[href] = file return item def change( self, href, /, file=None, fs=None, open_modes="r", id=None, media_type=None, attrib=None, ): if fs is self._workfs: raise OSError(errno.EINVAL, f"Remapping the file that in the working fs is not supported, use `rename` instead: {fs!r}") if href in self.href_to_id: item = self[self.href_to_id[href]] if attrib: item.update(attrib) if media_type: item.media_type = media_type try: self.href_to_file[href].remove() except: pass self._href_to_file[href] = File(file, fs, open_modes) return item else: return self.add( href, file=file, fs=fs, open_modes=open_modes, id=id, media_type=media_type, attrib=attrib, ) def exists(self, href, /): if isinstance(href, Item): return href in self if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" return href in self._href_to_id @PyLinq.streamify def glob(self, pattern="*", dirname="", ignore_case=False): pattern = pattern.strip("/") if not pattern: return if isinstance(dirname, Item): dirname = posixpath.dirname(unquote(href._attrib["href"])) else: dirname = dirname.strip("/") if dirname: dirname = re_escape(dirname) pattern = joinpath(dirname, *posix_glob_translate_iter(pattern)) if ignore_case: pattern = "(?i:%s)" % pattern matches = re_compile(pattern).fullmatch for href, id in self._href_to_id.items(): if not matches(href): continue try: yield super().__getitem__(id) except KeyError: pass @PyLinq.streamify def iterdir(self, /, dirname=""): if isinstance(dirname, Item): dirname = posixpath.dirname(unquote(href._attrib["href"])) else: dirname = dirname.strip("/") for href, id in self._href_to_id.items(): if posixpath.dirname(href) != dirname: continue try: yield super().__getitem__(id) except KeyError: pass def open( self, href, /, mode="r", buffering=-1, encoding=None, errors=None, newline=None, ): if mode not in OPEN_MODES: raise ValueError(f"invalid open mode: {mode!r}") if isinstance(href, Item): if href not in self: raise LookupError(f"no such item: {href!r}") href = unquote(href["href"]) else: if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" href_to_file = self._href_to_file if href in self._href_to_id: if "x" in mode: raise FileExistsError(errno.EEXIST, f"file exists: {href!r}") file = href_to_file.get(href) uid = str(uuid4()) if file is None: href_to_file[href] = file = File(uid, self._workfs) elif not file.check_open_mode(mode): if "w" not in mode: try: fsrc = file.open("rb", buffering=0) except FileNotFoundError: if "r" in mode: raise else: with fsrc: copyfileobj(fsrc, self._workfs.open(uid, "wb")) href_to_file[href] = file = File(uid, self._workfs) elif "r" in mode: raise FileNotFoundError(errno.ENOENT, f"no such file: {href!r}") else: item = self.add(href) file = href_to_file[href] if "b" not in mode and encoding is None: encoding = "utf-8" return file.open( mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, ) def read(self, href, /, buffering=0): with self.open(href, "rb", buffering=buffering) as f: return f.read() read_bytes = read def read_text(self, href, /, encoding=None): with self.open(href, "r", encoding=encoding) as f: return f.read() def remove(self, href, /): if isinstance(href, Item): if href not in self: raise LookupError(f"no such item: {href!r}") href = unquote(href["href"]) else: if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" try: id = self._href_to_id.pop(href) except LookupError: raise FileNotFoundError(errno.ENOENT, f"no such file: {href!r}") item = super().pop(id, None) if item is not None: try: self._root.remove(item._root) except: pass file = self._href_to_file.pop(href, None) if file is not None and file.check_open_mode("w"): try: file.remove() except: pass def _rename(self, item, href, dest_href, /): try: id = self._href_to_id[dest_href] = self._href_to_id.pop(href) except LookupError: raise FileNotFoundError(errno.ENOENT, f"no such file: {href!r}") if item is None: item = super().__getitem__(id) item._attrib["href"] = quote(dest_href, safe=":/?&=#") self._href_to_file[dest_href] = self._href_to_file.pop(href, None) def rename(self, href, dest_href, /, repair=False): result = {} if isinstance(href, Item): item = href if item not in self: raise LookupError(f"no such item: {item!r}") href = unquote(item._attrib["href"]) else: if isinstance(href, (bytes, PathLike)): href = fsdecode(href) else: href = str(href) assert (href := href.strip("/")), "empty href" item = None if isinstance(dest_href, (bytes, PathLike)): dest_href = fsdecode(dest_href) else: dest_href = str(dest_href) assert (dest_href := dest_href.strip("/")), "empty href" result["pathpair"] = (href, dest_href) if href != dest_href: if dest_href in self._href_to_id: raise FileExistsError(errno.EEXIST, f"target file exists: {dest_href!r}") self._rename(item, href, dest_href) if repair:
result["repairs"] = remap_links(self, (href, dest_href))
12
2023-11-20 14:46:41+00:00
16k
ymp5078/AI-SAM
segment_anything/automatic_mask_generator.py
[ { "identifier": "Sam", "path": "segment_anything/modeling/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decode...
import numpy as np import torch import cv2 # type: ignore # noqa: F401 from torchvision.ops.boxes import batched_nms, box_area # type: ignore from typing import Any, Dict, List, Optional, Tuple from .modeling import Sam from .predictor import SamPredictor from .utils.amg import ( MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points, ) from pycocotools import mask as mask_utils # type: ignore # noqa: F401
10,971
ann = { "segmentation": mask_data["segmentations"][idx], "area": area_from_rle(mask_data["rles"][idx]), "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), "predicted_iou": mask_data["iou_preds"][idx].item(), "point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch( points, cropped_im_size, crop_box, orig_size ) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones( in_points.shape[0], dtype=torch.int, device=in_points.device ) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score
# -*- coding: utf-8 -*- # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamAutomaticMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = "binary_mask", ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crop_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": mask_data["segmentations"] = [ coco_encode_rle(rle) for rle in mask_data["rles"] ] elif self.output_mode == "binary_mask": mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] else: mask_data["segmentations"] = mask_data["rles"] # Write mask records curr_anns = [] for idx in range(len(mask_data["segmentations"])): ann = { "segmentation": mask_data["segmentations"][idx], "area": area_from_rle(mask_data["rles"][idx]), "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), "predicted_iou": mask_data["iou_preds"][idx].item(), "point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch( points, cropped_im_size, crop_box, orig_size ) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones( in_points.shape[0], dtype=torch.int, device=in_points.device ) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score
data["stability_score"] = calculate_stability_score(
8
2023-11-26 23:42:53+00:00
16k
sophiaalthammer/alforrankers
matchmaker/utils/input_pipeline.py
[ { "identifier": "ConditionalQueryGenerationInferenceReader", "path": "matchmaker/dataloaders/query_generation_inference_loader.py", "snippet": "class ConditionalQueryGenerationInferenceReader(DatasetReader):\n \"\"\"\n Read a tsv file containing a passage collection.\n \n Expected format for...
import torch import numpy import random import torch.multiprocessing as mp from allennlp.data.samplers import BucketBatchSampler, MaxTokensBatchSampler from allennlp.data.vocabulary import Vocabulary from allennlp.data.data_loaders import MultiProcessDataLoader from transformers import T5Tokenizer from allennlp.data.token_indexers import PretrainedTransformerIndexer from allennlp.data.tokenizers import PretrainedTransformerTokenizer from matchmaker.dataloaders.concatenated_reranking_loader import * from matchmaker.dataloaders.concatenated_training_loader import * from matchmaker.dataloaders.independent_reranking_loader import * from matchmaker.dataloaders.independent_training_loader import * from matchmaker.dataloaders.id_sequence_loader import * from matchmaker.dataloaders.mlm_masked_sequence_loader import * from matchmaker.dataloaders.query_generation_inference_loader import ConditionalQueryGenerationInferenceReader from matchmaker.dataloaders.tas_balanced_training_loader import * from matchmaker.dataloaders.pseudo_label_training_loader import PseudoLabelDatasetLoader, PseudoLabelTextDatasetLoader from matchmaker.dataloaders.triple_id_training_loader import TripleIdDatasetLoader from transformers import AutoTokenizer from matchmaker.dataloaders.bling_fire_tokenizer import BlingFireTokenizer from matchmaker.dataloaders.transformer_tokenizer import FastTransformerTokenizer from matchmaker.modules.bert_embedding_token_embedder import PretrainedBertIndexerNoSpecialTokens from typing import Dict, Tuple, List
12,133
batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "triple_ids": loader = TripleIdDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], triples_with_teacher_scores=run_config["dynamic_triples_with_teacher_scores"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "mlm_pretrain": loader = MLMDatasetLoader(collection_file=run_config["train_tsv"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], random_seed=run_config["random_seed"], min_doc_length=-1, mlm_mask_whole_words=True, mask_probability=run_config["mask_probability"], mlm_mask_replace_probability=run_config["mlm_mask_replace_probability"], mlm_mask_random_probability=run_config["mlm_mask_random_probability"], whole_word_masking=run_config["whole_word_masking"], random_spans=run_config["random_spans"], tasb=run_config["tasb"], tasb_cluster_file=run_config["tasb_cluster_file"], tasb_weight=run_config["tasb_weight"], grad_acc=run_config["gradient_accumulation_steps"], cached_chunk_size=int(run_config["batch_size_train"])/int(run_config["cache_chunk_size"])) else: raise ConfigurationError("dynamic sampler type not supported") return loader def allennlp_reranking_inference_loader(model_config, run_config, _input_file): ''' Load examples from a .tsv file in the reranking candidate file format: q_id<tab>d_id<tab>q_text<tab>d_text (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) if model_config.get("model_input_type", "") == "concatenated" or model_config["token_embedder_type"] == "bert_cat": reader = ConcatenatedReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], train_qa_spans=run_config["train_qa_spans"]) else: reader = IndependentReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config.get("min_doc_length",-1), min_query_length=run_config.get("min_query_length",-1), query_augment_mask_number=run_config.get("query_augment_mask_number",-1), train_qa_spans=run_config.get("train_qa_spans",False)) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_eval"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_sampler=MaxTokensBatchSampler(max_tokens=int(run_config["batch_size_eval"])*run_config["max_doc_length"], sorting_keys=["doc_tokens"], padding_noise=0)) loader.index_with(_vocab) return loader def allennlp_query_gen_train_loader(model_config, run_config, _input_file): ''' Load examples from a .tsv file in the reranking candidate file format: q_id<tab>d_id<tab>q_text<tab>d_text (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) reader = IndependentReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config.get("min_doc_length",-1), min_query_length=run_config.get("min_query_length",-1), query_augment_mask_number=run_config.get("query_augment_mask_number",-1), train_qa_spans=run_config.get("train_qa_spans",False)) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_train"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_size=run_config["batch_size_train"]) loader.index_with(_vocab) return loader def allennlp_query_gen_inference_loader(model_config, run_config, _input_file,): ''' Load examples from a .tsv file in the single sequence format: id<tab>text and augment it with conditional query codes (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, run_config["max_doc_length"]) max_length = model_config["max_doc_length"] batch_size = run_config["collection_batch_size"] reader = ConditionalQueryGenerationInferenceReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=max_length, target_distribution_file=run_config["target_distribution_file"], target_number_of_queries_total=run_config["target_number_of_queries_total"]) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(batch_size)*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_sampler=MaxTokensBatchSampler(max_tokens=int(batch_size)*max_length, sorting_keys=["doc_tokens"], padding_noise=0)) loader.index_with(_vocab) return loader def _get_indexer(model_config, max_length): # default values _tokenizer = BlingFireTokenizer() _vocab = Vocabulary() if model_config["token_embedder_type"] == "embedding": _token_indexers = {"tokens": SingleIdTokenIndexer(lowercase_tokens=True)} _vocab = Vocabulary.from_files(model_config["vocab_directory"]) elif model_config["token_embedder_type"] == "bert_embedding" or model_config["token_embedder_type"] == "bert_vectors": _tokenizer = PretrainedTransformerTokenizer(model_config["bert_pretrained_model"], do_lowercase=True, start_tokens=[], end_tokens=[])
#from tokenizers import ByteLevelBPETokenizer,CharBPETokenizer #from matchmaker.dataloaders.transformer_tokenizer import CustomTransformerTokenizer,CustomTransformerIndexer mp.set_sharing_strategy("file_system") # VERY MUCH needed for linux !! makes everything faster, but tends to break stuff def allennlp_single_sequence_loader(model_config, run_config, _input_file, sequence_type, force_exact_batch_size=False): ''' Load examples from a .tsv file in the single sequence format: id<tab>text (Using allennlp's v2 multiprocess loader) ''' if model_config.get("model_input_type", "") == "mlm": sequence_type == "single_mlm" if sequence_type == "query": max_length = run_config.get("overwrite_max_query_length", model_config["max_query_length"]) min_length = model_config.get("min_query_length",-1) batch_size = run_config["query_batch_size"] split_document=False split_document_window_size=-1 if sequence_type == "single_mlm": max_length = run_config.get("overwrite_max_doc_length", model_config["max_doc_length"]) min_length = model_config.get("min_doc_length", -1) batch_size = run_config.get("collection_batch_size", run_config["batch_size_train"]) make_multiple_of=run_config.get("make_multiple_of",8) mask_probability=run_config.get("mask_probability",0.1) mlm_mask_replace_probability=run_config.get("mlm_mask_replace_probability",0.5) mlm_mask_random_probability=run_config.get("mlm_mask_random_probability",0.5) else: # doc max_length = run_config.get("overwrite_max_doc_length", model_config["max_doc_length"]) min_length = model_config.get("min_doc_length",-1) batch_size = run_config["collection_batch_size"] split_document=run_config.get("split_document",False) split_document_window_size=run_config.get("split_document_window_size",-1) _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max_length) #if model_config.get("model_input_type", "") == "mlm": # reader = MLMMaskedSequenceDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, # max_doc_length=max_length, min_doc_length=min_length, # mask_probability=mask_probability, # mlm_mask_replace_probability=mlm_mask_replace_probability, # mlm_mask_random_probability=mlm_mask_random_probability, # make_multiple_of=make_multiple_of) reader = IdSequenceDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, split_document=split_document,split_document_window_size=split_document_window_size, max_seq_length=max_length, min_seq_length=min_length, sequence_type=sequence_type) if force_exact_batch_size: loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(batch_size)*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_size=int(batch_size)) else: loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(batch_size)*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_sampler=MaxTokensBatchSampler(max_tokens=int(batch_size)*max_length, sorting_keys=["seq_tokens"], padding_noise=0)) loader.index_with(_vocab) return loader def allennlp_triple_training_loader(model_config, run_config, _input_file,add_text_to_batch=False): ''' Load training examples (either in the re-ranking text file format or a dynamic loader) (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) if run_config.get("dynamic_sampler", False) == False: if model_config.get("model_input_type", "") == "concatenated" or model_config["token_embedder_type"] == "bert_cat": reader = ConcatenatedTrainingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], data_augment=run_config["train_data_augment"], train_pairwise_distillation=run_config["train_pairwise_distillation"], train_qa_spans=run_config["train_qa_spans"],add_text_to_batch=add_text_to_batch) else: reader = IndependentTrainingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], data_augment=run_config["train_data_augment"], train_pairwise_distillation=run_config["train_pairwise_distillation"], query_augment_mask_number=run_config["query_augment_mask_number"], train_qa_spans=run_config["train_qa_spans"],add_text_to_batch=add_text_to_batch) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_train"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_size=run_config["batch_size_train"]) loader.index_with(_vocab) else: #if run_config["dynamic_sampler_type"] == "list": # loader = IrDynamicTripleDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], # qrels_file=run_config["dynamic_qrels_file"], candidate_file=run_config["dynamic_candidate_file"], # batch_size=int(run_config["batch_size_train"]), queries_per_batch=run_config["dynamic_queries_per_batch"], tokenizer=_tokenizer, token_indexers=_token_indexers, # max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], # min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], # data_augment=run_config["train_data_augment"], vocab=_vocab) if run_config["dynamic_sampler_type"] == "tas_balanced": loader = TASBalancedDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], pairs_with_teacher_scores=run_config["dynamic_pairs_with_teacher_scores"], query_cluster_file=run_config["dynamic_query_cluster_file"], batch_size=int(run_config["batch_size_train"]), clusters_per_batch=run_config["dynamic_clusters_per_batch"], tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], pair_balancing_strategy=run_config["tas_balanced_pair_strategy"],random_seed =run_config["random_seed"]) elif run_config["dynamic_sampler_type"] == "pseudo_label": loader = PseudoLabelDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], rankings_with_teacher_scores=run_config["dynamic_rankings_with_teacher_scores"], selection_type=run_config["pseudo_label_selection_type"],min_pos_score=run_config["pseudo_label_min_pos_score"], max_diff_to_be_pos=run_config["pseudo_label_max_diff_to_be_pos"],min_diff_to_neg=run_config["pseudo_label_min_diff_to_neg"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "pseudo_labeltext": loader = PseudoLabelTextDatasetLoader(rankings_with_teacher_scores=run_config["dynamic_rankings_with_teacher_scores"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "triple_ids": loader = TripleIdDatasetLoader(query_file=run_config["dynamic_query_file"], collection_file=run_config["dynamic_collection_file"], triples_with_teacher_scores=run_config["dynamic_triples_with_teacher_scores"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], random_seed =run_config["random_seed"],concatenate_sequences = model_config.get("model_input_type", "") == "concatenated") elif run_config["dynamic_sampler_type"] == "mlm_pretrain": loader = MLMDatasetLoader(collection_file=run_config["train_tsv"], batch_size=int(run_config["batch_size_train"]), tokenizer=_tokenizer, max_doc_length=run_config["max_doc_length"], random_seed=run_config["random_seed"], min_doc_length=-1, mlm_mask_whole_words=True, mask_probability=run_config["mask_probability"], mlm_mask_replace_probability=run_config["mlm_mask_replace_probability"], mlm_mask_random_probability=run_config["mlm_mask_random_probability"], whole_word_masking=run_config["whole_word_masking"], random_spans=run_config["random_spans"], tasb=run_config["tasb"], tasb_cluster_file=run_config["tasb_cluster_file"], tasb_weight=run_config["tasb_weight"], grad_acc=run_config["gradient_accumulation_steps"], cached_chunk_size=int(run_config["batch_size_train"])/int(run_config["cache_chunk_size"])) else: raise ConfigurationError("dynamic sampler type not supported") return loader def allennlp_reranking_inference_loader(model_config, run_config, _input_file): ''' Load examples from a .tsv file in the reranking candidate file format: q_id<tab>d_id<tab>q_text<tab>d_text (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) if model_config.get("model_input_type", "") == "concatenated" or model_config["token_embedder_type"] == "bert_cat": reader = ConcatenatedReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config["min_doc_length"], min_query_length=run_config["min_query_length"], train_qa_spans=run_config["train_qa_spans"]) else: reader = IndependentReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config.get("min_doc_length",-1), min_query_length=run_config.get("min_query_length",-1), query_augment_mask_number=run_config.get("query_augment_mask_number",-1), train_qa_spans=run_config.get("train_qa_spans",False)) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_eval"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_sampler=MaxTokensBatchSampler(max_tokens=int(run_config["batch_size_eval"])*run_config["max_doc_length"], sorting_keys=["doc_tokens"], padding_noise=0)) loader.index_with(_vocab) return loader def allennlp_query_gen_train_loader(model_config, run_config, _input_file): ''' Load examples from a .tsv file in the reranking candidate file format: q_id<tab>d_id<tab>q_text<tab>d_text (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, max(run_config["max_doc_length"], run_config["max_query_length"])) reader = IndependentReRankingDatasetReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=run_config["max_doc_length"], max_query_length=run_config["max_query_length"], min_doc_length=run_config.get("min_doc_length",-1), min_query_length=run_config.get("min_query_length",-1), query_augment_mask_number=run_config.get("query_augment_mask_number",-1), train_qa_spans=run_config.get("train_qa_spans",False)) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(run_config["batch_size_train"])*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_size=run_config["batch_size_train"]) loader.index_with(_vocab) return loader def allennlp_query_gen_inference_loader(model_config, run_config, _input_file,): ''' Load examples from a .tsv file in the single sequence format: id<tab>text and augment it with conditional query codes (Using allennlp's v2 multiprocess loader) ''' _tokenizer, _token_indexers, _vocab = _get_indexer(model_config, run_config["max_doc_length"]) max_length = model_config["max_doc_length"] batch_size = run_config["collection_batch_size"] reader = ConditionalQueryGenerationInferenceReader(tokenizer=_tokenizer, token_indexers=_token_indexers, max_doc_length=max_length, target_distribution_file=run_config["target_distribution_file"], target_number_of_queries_total=run_config["target_number_of_queries_total"]) loader = MultiProcessDataLoader(reader, data_path=_input_file, num_workers=run_config["dataloader_num_workers"], max_instances_in_memory=int(batch_size)*25, quiet=True, start_method="fork" if "fork" in mp.get_all_start_methods() else "spawn", batch_sampler=MaxTokensBatchSampler(max_tokens=int(batch_size)*max_length, sorting_keys=["doc_tokens"], padding_noise=0)) loader.index_with(_vocab) return loader def _get_indexer(model_config, max_length): # default values _tokenizer = BlingFireTokenizer() _vocab = Vocabulary() if model_config["token_embedder_type"] == "embedding": _token_indexers = {"tokens": SingleIdTokenIndexer(lowercase_tokens=True)} _vocab = Vocabulary.from_files(model_config["vocab_directory"]) elif model_config["token_embedder_type"] == "bert_embedding" or model_config["token_embedder_type"] == "bert_vectors": _tokenizer = PretrainedTransformerTokenizer(model_config["bert_pretrained_model"], do_lowercase=True, start_tokens=[], end_tokens=[])
_ind = PretrainedBertIndexerNoSpecialTokens(pretrained_model=model_config["bert_pretrained_model"], do_lowercase=True, max_pieces=max_length)
6
2023-11-21 10:38:22+00:00
16k
MICLab-Unicamp/medpseg
medpseg/poly_pipeline.py
[ { "identifier": "PolySeg2DModule", "path": "medpseg/poly_seg_2d_module.py", "snippet": "class PolySeg2DModule(pl.LightningModule):\n '''\n Regarding of the name, also works with 3D networks\n '''\n def __init__(self, hparams):\n '''\n Check starter.py for description of all hpa...
import os import torch import numpy as np import cc3d import SimpleITK as sitk from medpseg.poly_seg_2d_module import PolySeg2DModule from medpseg.eval_2d_utils import E2DStackDataset, argon_cpu_count from torch.nn import functional as F from tqdm import tqdm from collections import defaultdict from operator import itemgetter from typing import Dict, Optional from multiprocessing import Queue
10,858
''' Copyright (c) Diedre Carmo, Medical Imaging Computing Lab (MICLab) https://miclab.fee.unicamp.br/ https://github.com/MICLab-Unicamp/medpseg All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. Independent script Updated pipeline using a single weight ''' def get_connected_components(volume, return_largest=2, verbose=False): ''' volume: input volume return_largest: how many of the largest labels to return. If 0, nothing is changed in input volume verbose: prints label_count returns: filtered_volume, label_count, labeled_volume ''' labels_out = cc3d.connected_components(volume.astype(np.int32)) label_count = np.unique(labels_out, return_counts=True)[1] # Indicate which was the original label and sort by count label_count = [(label, count) for label, count in enumerate(label_count)] label_count.sort(key=itemgetter(1), reverse=True) label_count.pop(0) # remove largest which should be background if verbose: print(f"Label count: {label_count}") filtered = None if return_largest > 0: for i in range(return_largest): try: id_max = label_count[i][0] if filtered is None: filtered = (labels_out == id_max) else: filtered += (labels_out == id_max) except IndexError: # We want more components that what is in the image, stop break volume = filtered * volume labels_out = filtered * labels_out return volume, label_count, labels_out class PrintInterface(): def __init__(self, tqdm_iter): self.tqdm_iter = tqdm_iter self.rot90 = False def write(self, x): self.tqdm_iter.put(("write", x)) def progress(self, x): self.tqdm_iter.put(("iterbar", x)) def image_to_front_end(self, x): if self.rot90: x = np.rot90(x, k=2, axes=(0, 1)) self.tqdm_iter.put(("slice", x)) def icon(self): self.tqdm_iter.put(("icon", '')) def poly_stack_predict(model: torch.nn.Module, volume: torch.Tensor, batch_size: int, device=torch.device("cuda:0"), info_q: Optional[Queue] = None, uncertainty: Optional[int] = None): ''' DEVING uncertainty: epistemic uncerainty, predict n times and return the mean and std prediction '''
''' Copyright (c) Diedre Carmo, Medical Imaging Computing Lab (MICLab) https://miclab.fee.unicamp.br/ https://github.com/MICLab-Unicamp/medpseg All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. Independent script Updated pipeline using a single weight ''' def get_connected_components(volume, return_largest=2, verbose=False): ''' volume: input volume return_largest: how many of the largest labels to return. If 0, nothing is changed in input volume verbose: prints label_count returns: filtered_volume, label_count, labeled_volume ''' labels_out = cc3d.connected_components(volume.astype(np.int32)) label_count = np.unique(labels_out, return_counts=True)[1] # Indicate which was the original label and sort by count label_count = [(label, count) for label, count in enumerate(label_count)] label_count.sort(key=itemgetter(1), reverse=True) label_count.pop(0) # remove largest which should be background if verbose: print(f"Label count: {label_count}") filtered = None if return_largest > 0: for i in range(return_largest): try: id_max = label_count[i][0] if filtered is None: filtered = (labels_out == id_max) else: filtered += (labels_out == id_max) except IndexError: # We want more components that what is in the image, stop break volume = filtered * volume labels_out = filtered * labels_out return volume, label_count, labels_out class PrintInterface(): def __init__(self, tqdm_iter): self.tqdm_iter = tqdm_iter self.rot90 = False def write(self, x): self.tqdm_iter.put(("write", x)) def progress(self, x): self.tqdm_iter.put(("iterbar", x)) def image_to_front_end(self, x): if self.rot90: x = np.rot90(x, k=2, axes=(0, 1)) self.tqdm_iter.put(("slice", x)) def icon(self): self.tqdm_iter.put(("icon", '')) def poly_stack_predict(model: torch.nn.Module, volume: torch.Tensor, batch_size: int, device=torch.device("cuda:0"), info_q: Optional[Queue] = None, uncertainty: Optional[int] = None): ''' DEVING uncertainty: epistemic uncerainty, predict n times and return the mean and std prediction '''
e2d_stack_dataloader = E2DStackDataset(volume, extended_2d=1).get_dataloader(batch_size=batch_size, pin_memory=False, num_workers=argon_cpu_count())
2
2023-11-21 20:03:33+00:00
16k
DLYuanGod/TinyGPT-V
minigpt4/datasets/builders/image_text_pair_builder.py
[ { "identifier": "registry", "path": "minigpt4/common/registry.py", "snippet": "class Registry:\n def register_builder(cls, name):\n def wrap(builder_cls):\n def register_task(cls, name):\n def wrap(task_cls):\n def register_model(cls, name):\n def wrap(model_cls):\n def ...
import os import logging import warnings from minigpt4.common.registry import registry from minigpt4.datasets.builders.base_dataset_builder import BaseDatasetBuilder from minigpt4.datasets.datasets.laion_dataset import LaionDataset from minigpt4.datasets.datasets.cc_sbu_dataset import CCSBUDataset, CCSBUAlignDataset from minigpt4.datasets.datasets.text_caps import TextCapDataset from minigpt4.datasets.datasets.llava_dataset import LlavaDetailDataset, LlavaReasonDataset, LlavaConversationDataset from minigpt4.datasets.datasets.unnatural_instruction import UnnaturalDataset from minigpt4.datasets.datasets.multitask_conversation import MultiTaskConversationDataset from minigpt4.datasets.datasets.flickr import GroundedDetailDataset,CaptionToObjectDataset,PhraseToObjectDataset from minigpt4.datasets.datasets.vg_dataset import ReferVisualGenomeDataset from minigpt4.datasets.datasets.coco_dataset import ReferCOCODataset, InvReferCOCODataset from minigpt4.datasets.datasets.gqa_datasets import GQADataset from minigpt4.datasets.datasets.aok_vqa_datasets import AOKVQADataset from minigpt4.datasets.datasets.coco_vqa_datasets import COCOVQADataset from minigpt4.datasets.datasets.ocrvqa_dataset import OCRVQADataset from minigpt4.datasets.datasets.coco_caption import COCOCapDataset
11,982
DATASET_CONFIG_DICT = {"default": "configs/datasets/aokvqa/defaults.yaml"} @registry.register_builder("gqa") class GQABuilder(BaseDatasetBuilder): train_dataset_cls = GQADataset DATASET_CONFIG_DICT = { "default": "configs/datasets/gqa/balanced_val.yaml", } @registry.register_builder("flickr_grounded_caption") class GroundedCaptionBuilder(BaseDatasetBuilder): train_dataset_cls = GroundedDetailDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/flickr/default.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("flickr_CaptionToPhrase") class CaptionToPhraseBuilder(BaseDatasetBuilder): train_dataset_cls = CaptionToObjectDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/flickr/caption_to_phrase.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("flickr_ObjectToPhrase") class CaptionToPhraseBuilder(BaseDatasetBuilder): train_dataset_cls = PhraseToObjectDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/flickr/object_to_phrase.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets class DocumentVQABuilder(BaseDatasetBuilder): def _download_ann(self): pass def _download_vis(self): pass def build(self): self.build_processors() build_info = self.config.build_info datasets = dict() split = "train" dataset_cls = self.train_dataset_cls datasets[split] = dataset_cls( vis_processor=self.vis_processors[split], text_processor=self.text_processors[split], vis_root=build_info.image_path, ann_path=build_info.ann_path ) return datasets @registry.register_builder("ocrvqa") class OCRVQABuilder(DocumentVQABuilder):
@registry.register_builder("multitask_conversation") class MultitaskConversationBuilder(BaseDatasetBuilder): train_dataset_cls = MultiTaskConversationDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/multitask_conversation/default.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("unnatural_instruction") class UnnaturalInstructionBuilder(BaseDatasetBuilder): train_dataset_cls = UnnaturalDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/nlp/unnatural_instruction.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( text_processor=self.text_processors["train"], ann_path=build_info.ann_path, ) return datasets @registry.register_builder("llava_detail") class LlavaDetailBuilder(BaseDatasetBuilder): train_dataset_cls = LlavaDetailDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/llava/detail.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("llava_reason") class LlavaReasonBuilder(BaseDatasetBuilder): train_dataset_cls = LlavaReasonDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/llava/reason.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("llava_conversation") class LlavaReasonBuilder(BaseDatasetBuilder): train_dataset_cls = LlavaConversationDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/llava/conversation.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets class AllRefCOCOBuilder(BaseDatasetBuilder): def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info image_path = build_info.image_path ann_path = build_info.ann_path datasets = dict() if not os.path.exists(image_path): warnings.warn("image path {} does not exist.".format(image_path)) if not os.path.exists(ann_path): warnings.warn("ann path {} does not exist.".format(ann_path)) # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=ann_path, vis_root=image_path, dataset=build_info.dataset, splitBy=build_info.splitBy ) return datasets @registry.register_builder("refcoco") class RefCOCOBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcoco.yaml", } @registry.register_builder("refcocop") class RefCOCOPBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcocop.yaml", } @registry.register_builder("refcocog") class RefCOCOGBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcocog.yaml", } @registry.register_builder("invrefcoco") class RefCOCOBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcoco.yaml", } @registry.register_builder("invrefcocop") class RefCOCOPBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcocop.yaml", } @registry.register_builder("invrefcocog") class RefCOCOGBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcocog.yaml", } @registry.register_builder("refvg") class RefVisualGenomeBuilder(BaseDatasetBuilder): train_dataset_cls = ReferVisualGenomeDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/vg/ref.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info data_dir = build_info.data_dir datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], data_dir=data_dir, ) return datasets @registry.register_builder("textcaps_caption") class TextcapCaptionBuilder(BaseDatasetBuilder): train_dataset_cls = TextCapDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/textcaps/caption.yaml"} def _download_ann(self): pass def _download_vis(self): pass def build(self): self.build_processors() build_info = self.config.build_info datasets = dict() split = "train" # create datasets # [NOTE] return inner_datasets (wds.DataPipeline) dataset_cls = self.train_dataset_cls datasets[split] = dataset_cls( vis_processor=self.vis_processors[split], text_processor=self.text_processors[split], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("coco_vqa") class COCOVQABuilder(BaseDatasetBuilder): train_dataset_cls = COCOVQADataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco/defaults_vqa.yaml", } @registry.register_builder("ok_vqa") class OKVQABuilder(COCOVQABuilder): DATASET_CONFIG_DICT = { "default": "configs/datasets/okvqa/defaults.yaml", } @registry.register_builder("aok_vqa") class AOKVQABuilder(BaseDatasetBuilder): train_dataset_cls = AOKVQADataset DATASET_CONFIG_DICT = {"default": "configs/datasets/aokvqa/defaults.yaml"} @registry.register_builder("gqa") class GQABuilder(BaseDatasetBuilder): train_dataset_cls = GQADataset DATASET_CONFIG_DICT = { "default": "configs/datasets/gqa/balanced_val.yaml", } @registry.register_builder("flickr_grounded_caption") class GroundedCaptionBuilder(BaseDatasetBuilder): train_dataset_cls = GroundedDetailDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/flickr/default.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("flickr_CaptionToPhrase") class CaptionToPhraseBuilder(BaseDatasetBuilder): train_dataset_cls = CaptionToObjectDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/flickr/caption_to_phrase.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("flickr_ObjectToPhrase") class CaptionToPhraseBuilder(BaseDatasetBuilder): train_dataset_cls = PhraseToObjectDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/flickr/object_to_phrase.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets class DocumentVQABuilder(BaseDatasetBuilder): def _download_ann(self): pass def _download_vis(self): pass def build(self): self.build_processors() build_info = self.config.build_info datasets = dict() split = "train" dataset_cls = self.train_dataset_cls datasets[split] = dataset_cls( vis_processor=self.vis_processors[split], text_processor=self.text_processors[split], vis_root=build_info.image_path, ann_path=build_info.ann_path ) return datasets @registry.register_builder("ocrvqa") class OCRVQABuilder(DocumentVQABuilder):
train_dataset_cls = OCRVQADataset
20
2023-12-28 05:47:18+00:00
16k
jiawei-ren/dreamgaussian4d
diffusers/src/diffusers/models/attention.py
[ { "identifier": "USE_PEFT_BACKEND", "path": "diffusers/src/diffusers/utils/constants.py", "snippet": "USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version" }, { "identifier": "maybe_allow_in_graph", "path": "diffusers/src/diffusers/utils/torch_utils.py", "snippet"...
from typing import Any, Dict, Optional from torch import nn from ..utils import USE_PEFT_BACKEND from ..utils.torch_utils import maybe_allow_in_graph from .activations import GEGLU, GELU, ApproximateGELU from .attention_processor import Attention from .embeddings import SinusoidalPositionalEmbedding from .lora import LoRACompatibleLinear from .normalization import AdaLayerNorm, AdaLayerNormZero import torch
12,521
self.attn1 = Attention( query_dim=time_mix_inner_dim, heads=num_attention_heads, dim_head=attention_head_dim, cross_attention_dim=None, ) # 2. Cross-Attn if cross_attention_dim is not None: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. self.norm2 = nn.LayerNorm(time_mix_inner_dim) self.attn2 = Attention( query_dim=time_mix_inner_dim, cross_attention_dim=cross_attention_dim, heads=num_attention_heads, dim_head=attention_head_dim, ) # is self-attn if encoder_hidden_states is none else: self.norm2 = None self.attn2 = None # 3. Feed-forward self.norm3 = nn.LayerNorm(time_mix_inner_dim) self.ff = FeedForward(time_mix_inner_dim, activation_fn="geglu") # let chunk size default to None self._chunk_size = None self._chunk_dim = None def set_chunk_feed_forward(self, chunk_size: Optional[int], **kwargs): # Sets chunk feed-forward self._chunk_size = chunk_size # chunk dim should be hardcoded to 1 to have better speed vs. memory trade-off self._chunk_dim = 1 def forward( self, hidden_states: torch.FloatTensor, num_frames: int, encoder_hidden_states: Optional[torch.FloatTensor] = None, ) -> torch.FloatTensor: # Notice that normalization is always applied before the real computation in the following blocks. # 0. Self-Attention batch_size = hidden_states.shape[0] batch_frames, seq_length, channels = hidden_states.shape batch_size = batch_frames // num_frames hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, seq_length, channels) hidden_states = hidden_states.permute(0, 2, 1, 3) hidden_states = hidden_states.reshape(batch_size * seq_length, num_frames, channels) residual = hidden_states hidden_states = self.norm_in(hidden_states) if self._chunk_size is not None: hidden_states = _chunked_feed_forward(self.ff, hidden_states, self._chunk_dim, self._chunk_size) else: hidden_states = self.ff_in(hidden_states) if self.is_res: hidden_states = hidden_states + residual norm_hidden_states = self.norm1(hidden_states) attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None) hidden_states = attn_output + hidden_states # 3. Cross-Attention if self.attn2 is not None: norm_hidden_states = self.norm2(hidden_states) attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states) hidden_states = attn_output + hidden_states # 4. Feed-forward norm_hidden_states = self.norm3(hidden_states) if self._chunk_size is not None: ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size) else: ff_output = self.ff(norm_hidden_states) if self.is_res: hidden_states = ff_output + hidden_states else: hidden_states = ff_output hidden_states = hidden_states[None, :].reshape(batch_size, seq_length, num_frames, channels) hidden_states = hidden_states.permute(0, 2, 1, 3) hidden_states = hidden_states.reshape(batch_size * num_frames, seq_length, channels) return hidden_states class FeedForward(nn.Module): r""" A feed-forward layer. Parameters: dim (`int`): The number of channels in the input. dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`. mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. final_dropout (`bool` *optional*, defaults to False): Apply a final dropout. """ def __init__( self, dim: int, dim_out: Optional[int] = None, mult: int = 4, dropout: float = 0.0, activation_fn: str = "geglu", final_dropout: bool = False, ): super().__init__() inner_dim = int(dim * mult) dim_out = dim_out if dim_out is not None else dim
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def _chunked_feed_forward( ff: nn.Module, hidden_states: torch.Tensor, chunk_dim: int, chunk_size: int, lora_scale: Optional[float] = None ): # "feed_forward_chunk_size" can be used to save memory if hidden_states.shape[chunk_dim] % chunk_size != 0: raise ValueError( f"`hidden_states` dimension to be chunked: {hidden_states.shape[chunk_dim]} has to be divisible by chunk size: {chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." ) num_chunks = hidden_states.shape[chunk_dim] // chunk_size if lora_scale is None: ff_output = torch.cat( [ff(hid_slice) for hid_slice in hidden_states.chunk(num_chunks, dim=chunk_dim)], dim=chunk_dim, ) else: # TOOD(Patrick): LoRA scale can be removed once PEFT refactor is complete ff_output = torch.cat( [ff(hid_slice, scale=lora_scale) for hid_slice in hidden_states.chunk(num_chunks, dim=chunk_dim)], dim=chunk_dim, ) return ff_output @maybe_allow_in_graph class GatedSelfAttentionDense(nn.Module): r""" A gated self-attention dense layer that combines visual features and object features. Parameters: query_dim (`int`): The number of channels in the query. context_dim (`int`): The number of channels in the context. n_heads (`int`): The number of heads to use for attention. d_head (`int`): The number of channels in each head. """ def __init__(self, query_dim: int, context_dim: int, n_heads: int, d_head: int): super().__init__() # we need a linear projection since we need cat visual feature and obj feature self.linear = nn.Linear(context_dim, query_dim) self.attn = Attention(query_dim=query_dim, heads=n_heads, dim_head=d_head) self.ff = FeedForward(query_dim, activation_fn="geglu") self.norm1 = nn.LayerNorm(query_dim) self.norm2 = nn.LayerNorm(query_dim) self.register_parameter("alpha_attn", nn.Parameter(torch.tensor(0.0))) self.register_parameter("alpha_dense", nn.Parameter(torch.tensor(0.0))) self.enabled = True def forward(self, x: torch.Tensor, objs: torch.Tensor) -> torch.Tensor: if not self.enabled: return x n_visual = x.shape[1] objs = self.linear(objs) x = x + self.alpha_attn.tanh() * self.attn(self.norm1(torch.cat([x, objs], dim=1)))[:, :n_visual, :] x = x + self.alpha_dense.tanh() * self.ff(self.norm2(x)) return x @maybe_allow_in_graph class BasicTransformerBlock(nn.Module): r""" A basic Transformer block. Parameters: dim (`int`): The number of channels in the input and output. num_attention_heads (`int`): The number of heads to use for multi-head attention. attention_head_dim (`int`): The number of channels in each head. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. num_embeds_ada_norm (: obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`. attention_bias (: obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter. only_cross_attention (`bool`, *optional*): Whether to use only cross-attention layers. In this case two cross attention layers are used. double_self_attention (`bool`, *optional*): Whether to use two self-attention layers. In this case no cross attention layers are used. upcast_attention (`bool`, *optional*): Whether to upcast the attention computation to float32. This is useful for mixed precision training. norm_elementwise_affine (`bool`, *optional*, defaults to `True`): Whether to use learnable elementwise affine parameters for normalization. norm_type (`str`, *optional*, defaults to `"layer_norm"`): The normalization layer to use. Can be `"layer_norm"`, `"ada_norm"` or `"ada_norm_zero"`. final_dropout (`bool` *optional*, defaults to False): Whether to apply a final dropout after the last feed-forward layer. attention_type (`str`, *optional*, defaults to `"default"`): The type of attention to use. Can be `"default"` or `"gated"` or `"gated-text-image"`. positional_embeddings (`str`, *optional*, defaults to `None`): The type of positional embeddings to apply to. num_positional_embeddings (`int`, *optional*, defaults to `None`): The maximum number of positional embeddings to apply. """ def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, dropout=0.0, cross_attention_dim: Optional[int] = None, activation_fn: str = "geglu", num_embeds_ada_norm: Optional[int] = None, attention_bias: bool = False, only_cross_attention: bool = False, double_self_attention: bool = False, upcast_attention: bool = False, norm_elementwise_affine: bool = True, norm_type: str = "layer_norm", # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single' norm_eps: float = 1e-5, final_dropout: bool = False, attention_type: str = "default", positional_embeddings: Optional[str] = None, num_positional_embeddings: Optional[int] = None, ): super().__init__() self.only_cross_attention = only_cross_attention self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero" self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" self.use_ada_layer_norm_single = norm_type == "ada_norm_single" self.use_layer_norm = norm_type == "layer_norm" if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." ) if positional_embeddings and (num_positional_embeddings is None): raise ValueError( "If `positional_embedding` type is defined, `num_positition_embeddings` must also be defined." ) if positional_embeddings == "sinusoidal": self.pos_embed = SinusoidalPositionalEmbedding(dim, max_seq_length=num_positional_embeddings) else: self.pos_embed = None # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) elif self.use_ada_layer_norm_zero: self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm) else: self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) self.attn1 = Attention( query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, cross_attention_dim=cross_attention_dim if only_cross_attention else None, upcast_attention=upcast_attention, ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. self.norm2 = ( AdaLayerNorm(dim, num_embeds_ada_norm) if self.use_ada_layer_norm else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) ) self.attn2 = Attention( query_dim=dim, cross_attention_dim=cross_attention_dim if not double_self_attention else None, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, upcast_attention=upcast_attention, ) # is self-attn if encoder_hidden_states is none else: self.norm2 = None self.attn2 = None # 3. Feed-forward if not self.use_ada_layer_norm_single: self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) self.ff = FeedForward( dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout, ) # 4. Fuser if attention_type == "gated" or attention_type == "gated-text-image": self.fuser = GatedSelfAttentionDense(dim, cross_attention_dim, num_attention_heads, attention_head_dim) # 5. Scale-shift for PixArt-Alpha. if self.use_ada_layer_norm_single: self.scale_shift_table = nn.Parameter(torch.randn(6, dim) / dim**0.5) # let chunk size default to None self._chunk_size = None self._chunk_dim = 0 def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0): # Sets chunk feed-forward self._chunk_size = chunk_size self._chunk_dim = dim def forward( self, hidden_states: torch.FloatTensor, attention_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, timestep: Optional[torch.LongTensor] = None, cross_attention_kwargs: Dict[str, Any] = None, class_labels: Optional[torch.LongTensor] = None, ) -> torch.FloatTensor: # Notice that normalization is always applied before the real computation in the following blocks. # 0. Self-Attention batch_size = hidden_states.shape[0] if self.use_ada_layer_norm: norm_hidden_states = self.norm1(hidden_states, timestep) elif self.use_ada_layer_norm_zero: norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1( hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype ) elif self.use_layer_norm: norm_hidden_states = self.norm1(hidden_states) elif self.use_ada_layer_norm_single: shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = ( self.scale_shift_table[None] + timestep.reshape(batch_size, 6, -1) ).chunk(6, dim=1) norm_hidden_states = self.norm1(hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa norm_hidden_states = norm_hidden_states.squeeze(1) else: raise ValueError("Incorrect norm used") if self.pos_embed is not None: norm_hidden_states = self.pos_embed(norm_hidden_states) # 1. Retrieve lora scale. lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0 # 2. Prepare GLIGEN inputs cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {} gligen_kwargs = cross_attention_kwargs.pop("gligen", None) attn_output = self.attn1( norm_hidden_states, encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, attention_mask=attention_mask, **cross_attention_kwargs, ) if self.use_ada_layer_norm_zero: attn_output = gate_msa.unsqueeze(1) * attn_output elif self.use_ada_layer_norm_single: attn_output = gate_msa * attn_output hidden_states = attn_output + hidden_states if hidden_states.ndim == 4: hidden_states = hidden_states.squeeze(1) # 2.5 GLIGEN Control if gligen_kwargs is not None: hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"]) # 3. Cross-Attention if self.attn2 is not None: if self.use_ada_layer_norm: norm_hidden_states = self.norm2(hidden_states, timestep) elif self.use_ada_layer_norm_zero or self.use_layer_norm: norm_hidden_states = self.norm2(hidden_states) elif self.use_ada_layer_norm_single: # For PixArt norm2 isn't applied here: # https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L70C1-L76C103 norm_hidden_states = hidden_states else: raise ValueError("Incorrect norm") if self.pos_embed is not None and self.use_ada_layer_norm_single is False: norm_hidden_states = self.pos_embed(norm_hidden_states) attn_output = self.attn2( norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=encoder_attention_mask, **cross_attention_kwargs, ) hidden_states = attn_output + hidden_states # 4. Feed-forward if not self.use_ada_layer_norm_single: norm_hidden_states = self.norm3(hidden_states) if self.use_ada_layer_norm_zero: norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self.use_ada_layer_norm_single: norm_hidden_states = self.norm2(hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory ff_output = _chunked_feed_forward( self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size, lora_scale=lora_scale ) else: ff_output = self.ff(norm_hidden_states, scale=lora_scale) if self.use_ada_layer_norm_zero: ff_output = gate_mlp.unsqueeze(1) * ff_output elif self.use_ada_layer_norm_single: ff_output = gate_mlp * ff_output hidden_states = ff_output + hidden_states if hidden_states.ndim == 4: hidden_states = hidden_states.squeeze(1) return hidden_states @maybe_allow_in_graph class TemporalBasicTransformerBlock(nn.Module): r""" A basic Transformer block for video like data. Parameters: dim (`int`): The number of channels in the input and output. time_mix_inner_dim (`int`): The number of channels for temporal attention. num_attention_heads (`int`): The number of heads to use for multi-head attention. attention_head_dim (`int`): The number of channels in each head. cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. """ def __init__( self, dim: int, time_mix_inner_dim: int, num_attention_heads: int, attention_head_dim: int, cross_attention_dim: Optional[int] = None, ): super().__init__() self.is_res = dim == time_mix_inner_dim self.norm_in = nn.LayerNorm(dim) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn self.norm_in = nn.LayerNorm(dim) self.ff_in = FeedForward( dim, dim_out=time_mix_inner_dim, activation_fn="geglu", ) self.norm1 = nn.LayerNorm(time_mix_inner_dim) self.attn1 = Attention( query_dim=time_mix_inner_dim, heads=num_attention_heads, dim_head=attention_head_dim, cross_attention_dim=None, ) # 2. Cross-Attn if cross_attention_dim is not None: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. self.norm2 = nn.LayerNorm(time_mix_inner_dim) self.attn2 = Attention( query_dim=time_mix_inner_dim, cross_attention_dim=cross_attention_dim, heads=num_attention_heads, dim_head=attention_head_dim, ) # is self-attn if encoder_hidden_states is none else: self.norm2 = None self.attn2 = None # 3. Feed-forward self.norm3 = nn.LayerNorm(time_mix_inner_dim) self.ff = FeedForward(time_mix_inner_dim, activation_fn="geglu") # let chunk size default to None self._chunk_size = None self._chunk_dim = None def set_chunk_feed_forward(self, chunk_size: Optional[int], **kwargs): # Sets chunk feed-forward self._chunk_size = chunk_size # chunk dim should be hardcoded to 1 to have better speed vs. memory trade-off self._chunk_dim = 1 def forward( self, hidden_states: torch.FloatTensor, num_frames: int, encoder_hidden_states: Optional[torch.FloatTensor] = None, ) -> torch.FloatTensor: # Notice that normalization is always applied before the real computation in the following blocks. # 0. Self-Attention batch_size = hidden_states.shape[0] batch_frames, seq_length, channels = hidden_states.shape batch_size = batch_frames // num_frames hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, seq_length, channels) hidden_states = hidden_states.permute(0, 2, 1, 3) hidden_states = hidden_states.reshape(batch_size * seq_length, num_frames, channels) residual = hidden_states hidden_states = self.norm_in(hidden_states) if self._chunk_size is not None: hidden_states = _chunked_feed_forward(self.ff, hidden_states, self._chunk_dim, self._chunk_size) else: hidden_states = self.ff_in(hidden_states) if self.is_res: hidden_states = hidden_states + residual norm_hidden_states = self.norm1(hidden_states) attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None) hidden_states = attn_output + hidden_states # 3. Cross-Attention if self.attn2 is not None: norm_hidden_states = self.norm2(hidden_states) attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states) hidden_states = attn_output + hidden_states # 4. Feed-forward norm_hidden_states = self.norm3(hidden_states) if self._chunk_size is not None: ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size) else: ff_output = self.ff(norm_hidden_states) if self.is_res: hidden_states = ff_output + hidden_states else: hidden_states = ff_output hidden_states = hidden_states[None, :].reshape(batch_size, seq_length, num_frames, channels) hidden_states = hidden_states.permute(0, 2, 1, 3) hidden_states = hidden_states.reshape(batch_size * num_frames, seq_length, channels) return hidden_states class FeedForward(nn.Module): r""" A feed-forward layer. Parameters: dim (`int`): The number of channels in the input. dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`. mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. final_dropout (`bool` *optional*, defaults to False): Apply a final dropout. """ def __init__( self, dim: int, dim_out: Optional[int] = None, mult: int = 4, dropout: float = 0.0, activation_fn: str = "geglu", final_dropout: bool = False, ): super().__init__() inner_dim = int(dim * mult) dim_out = dim_out if dim_out is not None else dim
linear_cls = LoRACompatibleLinear if not USE_PEFT_BACKEND else nn.Linear
0
2023-12-28 08:17:40+00:00
16k
FoundationVision/UniRef
detectron2/data/datasets/coco.py
[ { "identifier": "Boxes", "path": "detectron2/structures/boxes.py", "snippet": "class Boxes:\n \"\"\"\n This structure stores a list of boxes as a Nx4 torch.Tensor.\n It supports some common methods about boxes\n (`area`, `clip`, `nonempty`, etc),\n and also behaves like a Tensor\n (sup...
import contextlib import datetime import io import json import logging import numpy as np import os import shutil import pycocotools.mask as mask_util import detectron2.data.datasets # noqa # add pre-defined metadata import sys from fvcore.common.timer import Timer from iopath.common.file_io import file_lock from PIL import Image from detectron2.structures import Boxes, BoxMode, PolygonMasks, RotatedBoxes from detectron2.utils.file_io import PathManager from .. import DatasetCatalog, MetadataCatalog from pycocotools.coco import COCO from detectron2.utils.logger import setup_logger from detectron2.utils.visualizer import Visualizer
14,206
gt_files = sorted( (os.path.join(gt_root, f) for f in PathManager.ls(gt_root) if f.endswith(gt_ext)), key=lambda file_path: file2id(gt_root, file_path), ) assert len(gt_files) > 0, "No annotations found in {}.".format(gt_root) # Use the intersection, so that val2017_100 annotations can run smoothly with val2017 images if len(input_files) != len(gt_files): logger.warn( "Directory {} and {} has {} and {} files, respectively.".format( image_root, gt_root, len(input_files), len(gt_files) ) ) input_basenames = [os.path.basename(f)[: -len(image_ext)] for f in input_files] gt_basenames = [os.path.basename(f)[: -len(gt_ext)] for f in gt_files] intersect = list(set(input_basenames) & set(gt_basenames)) # sort, otherwise each worker may obtain a list[dict] in different order intersect = sorted(intersect) logger.warn("Will use their intersection of {} files.".format(len(intersect))) input_files = [os.path.join(image_root, f + image_ext) for f in intersect] gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect] logger.info( "Loaded {} images with semantic segmentation from {}".format(len(input_files), image_root) ) dataset_dicts = [] for (img_path, gt_path) in zip(input_files, gt_files): record = {} record["file_name"] = img_path record["sem_seg_file_name"] = gt_path dataset_dicts.append(record) return dataset_dicts def convert_to_coco_dict(dataset_name): """ Convert an instance detection/segmentation or keypoint detection dataset in detectron2's standard format into COCO json format. Generic dataset description can be found here: https://detectron2.readthedocs.io/tutorials/datasets.html#register-a-dataset COCO data format description can be found here: http://cocodataset.org/#format-data Args: dataset_name (str): name of the source dataset Must be registered in DatastCatalog and in detectron2's standard format. Must have corresponding metadata "thing_classes" Returns: coco_dict: serializable dict in COCO json format """ dataset_dicts = DatasetCatalog.get(dataset_name) metadata = MetadataCatalog.get(dataset_name) # unmap the category mapping ids for COCO if hasattr(metadata, "thing_dataset_id_to_contiguous_id"): reverse_id_mapping = {v: k for k, v in metadata.thing_dataset_id_to_contiguous_id.items()} reverse_id_mapper = lambda contiguous_id: reverse_id_mapping[contiguous_id] # noqa else: reverse_id_mapper = lambda contiguous_id: contiguous_id # noqa categories = [ {"id": reverse_id_mapper(id), "name": name} for id, name in enumerate(metadata.thing_classes) ] logger.info("Converting dataset dicts into COCO format") coco_images = [] coco_annotations = [] for image_id, image_dict in enumerate(dataset_dicts): coco_image = { "id": image_dict.get("image_id", image_id), "width": int(image_dict["width"]), "height": int(image_dict["height"]), "file_name": str(image_dict["file_name"]), } coco_images.append(coco_image) anns_per_image = image_dict.get("annotations", []) for annotation in anns_per_image: # create a new dict with only COCO fields coco_annotation = {} # COCO requirement: XYWH box format for axis-align and XYWHA for rotated bbox = annotation["bbox"] if isinstance(bbox, np.ndarray): if bbox.ndim != 1: raise ValueError(f"bbox has to be 1-dimensional. Got shape={bbox.shape}.") bbox = bbox.tolist() if len(bbox) not in [4, 5]: raise ValueError(f"bbox has to has length 4 or 5. Got {bbox}.") from_bbox_mode = annotation["bbox_mode"] to_bbox_mode = BoxMode.XYWH_ABS if len(bbox) == 4 else BoxMode.XYWHA_ABS bbox = BoxMode.convert(bbox, from_bbox_mode, to_bbox_mode) # COCO requirement: instance area if "segmentation" in annotation: # Computing areas for instances by counting the pixels segmentation = annotation["segmentation"] # TODO: check segmentation type: RLE, BinaryMask or Polygon if isinstance(segmentation, list): polygons = PolygonMasks([segmentation]) area = polygons.area()[0].item() elif isinstance(segmentation, dict): # RLE area = mask_util.area(segmentation).item() else: raise TypeError(f"Unknown segmentation type {type(segmentation)}!") else: # Computing areas using bounding boxes if to_bbox_mode == BoxMode.XYWH_ABS: bbox_xy = BoxMode.convert(bbox, to_bbox_mode, BoxMode.XYXY_ABS) area = Boxes([bbox_xy]).area()[0].item() else:
# Copyright (c) Facebook, Inc. and its affiliates. """ This file contains functions to parse COCO-format annotations into dicts in "Detectron2 format". """ logger = logging.getLogger(__name__) __all__ = ["load_coco_json", "load_sem_seg", "convert_to_coco_json", "register_coco_instances"] def load_coco_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None, dataset_name_in_dict="coco"): """ Load a json file with COCO's instances annotation format. Currently supports instance detection, instance segmentation, and person keypoints annotations. Args: json_file (str): full path to the json file in COCO instances annotation format. image_root (str or path-like): the directory where the images in this json file exists. dataset_name (str or None): the name of the dataset (e.g., coco_2017_train). When provided, this function will also do the following: * Put "thing_classes" into the metadata associated with this dataset. * Map the category ids into a contiguous range (needed by standard dataset format), and add "thing_dataset_id_to_contiguous_id" to the metadata associated with this dataset. This option should usually be provided, unless users need to load the original json content and apply more processing manually. extra_annotation_keys (list[str]): list of per-annotation keys that should also be loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints", "category_id", "segmentation"). The values for these keys will be returned as-is. For example, the densepose annotations are loaded in this way. Returns: list[dict]: a list of dicts in Detectron2 standard dataset dicts format (See `Using Custom Datasets </tutorials/datasets.html>`_ ) when `dataset_name` is not None. If `dataset_name` is None, the returned `category_ids` may be incontiguous and may not conform to the Detectron2 standard format. Notes: 1. This function does not read the image files. The results do not have the "image" field. """ timer = Timer() json_file = PathManager.get_local_path(json_file) with contextlib.redirect_stdout(io.StringIO()): coco_api = COCO(json_file) if timer.seconds() > 1: logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())) id_map = None if dataset_name is not None: meta = MetadataCatalog.get(dataset_name) cat_ids = sorted(coco_api.getCatIds()) cats = coco_api.loadCats(cat_ids) # The categories in a custom json file may not be sorted. thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])] meta.thing_classes = thing_classes # In COCO, certain category ids are artificially removed, # and by convention they are always ignored. # We deal with COCO's id issue and translate # the category ids to contiguous ids in [0, 80). # It works by looking at the "categories" field in the json, therefore # if users' own json also have incontiguous ids, we'll # apply this mapping as well but print a warning. if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)): if "coco" not in dataset_name: logger.warning( """ Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you. """ ) id_map = {v: i for i, v in enumerate(cat_ids)} meta.thing_dataset_id_to_contiguous_id = id_map # sort indices for reproducible results img_ids = sorted(coco_api.imgs.keys()) # imgs is a list of dicts, each looks something like: # {'license': 4, # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg', # 'file_name': 'COCO_val2014_000000001268.jpg', # 'height': 427, # 'width': 640, # 'date_captured': '2013-11-17 05:57:24', # 'id': 1268} imgs = coco_api.loadImgs(img_ids) # anns is a list[list[dict]], where each dict is an annotation # record for an object. The inner list enumerates the objects in an image # and the outer list enumerates over images. Example of anns[0]: # [{'segmentation': [[192.81, # 247.09, # ... # 219.03, # 249.06]], # 'area': 1035.749, # 'iscrowd': 0, # 'image_id': 1268, # 'bbox': [192.81, 224.8, 74.73, 33.43], # 'category_id': 16, # 'id': 42986}, # ...] anns = [coco_api.imgToAnns[img_id] for img_id in img_ids] total_num_valid_anns = sum([len(x) for x in anns]) total_num_anns = len(coco_api.anns) if total_num_valid_anns < total_num_anns: logger.warning( f"{json_file} contains {total_num_anns} annotations, but only " f"{total_num_valid_anns} of them match to images in the file." ) if "minival" not in json_file: # The popular valminusminival & minival annotations for COCO2014 contain this bug. # However the ratio of buggy annotations there is tiny and does not affect accuracy. # Therefore we explicitly white-list them. ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format( json_file ) imgs_anns = list(zip(imgs, anns)) logger.info("Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file)) dataset_dicts = [] ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + (extra_annotation_keys or []) num_instances_without_valid_segmentation = 0 for (img_dict, anno_dict_list) in imgs_anns: record = {} record["file_name"] = os.path.join(image_root, img_dict["file_name"]) record["height"] = img_dict["height"] record["width"] = img_dict["width"] image_id = record["image_id"] = img_dict["id"] objs = [] for anno in anno_dict_list: # Check that the image_id in this annotation is the same as # the image_id we're looking at. # This fails only when the data parsing logic or the annotation file is buggy. # The original COCO valminusminival2014 & minival2014 annotation files # actually contains bugs that, together with certain ways of using COCO API, # can trigger this assertion. assert anno["image_id"] == image_id assert anno.get("ignore", 0) == 0, '"ignore" in COCO json file is not supported.' obj = {key: anno[key] for key in ann_keys if key in anno} if "bbox" in obj and len(obj["bbox"]) == 0: raise ValueError( f"One annotation of image {image_id} contains empty 'bbox' value! " "This json does not have valid COCO format." ) segm = anno.get("segmentation", None) if segm: # either list[list[float]] or dict(RLE) if isinstance(segm, dict): if isinstance(segm["counts"], list): # convert to compressed RLE segm = mask_util.frPyObjects(segm, *segm["size"]) else: # filter out invalid polygons (< 3 points) segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6] if len(segm) == 0: num_instances_without_valid_segmentation += 1 continue # ignore this instance obj["segmentation"] = segm keypts = anno.get("keypoints", None) if keypts: # list[int] for idx, v in enumerate(keypts): if idx % 3 != 2: # COCO's segmentation coordinates are floating points in [0, H or W], # but keypoint coordinates are integers in [0, H-1 or W-1] # Therefore we assume the coordinates are "pixel indices" and # add 0.5 to convert to floating point coordinates. keypts[idx] = v + 0.5 obj["keypoints"] = keypts obj["bbox_mode"] = BoxMode.XYWH_ABS if id_map: annotation_category_id = obj["category_id"] try: obj["category_id"] = id_map[annotation_category_id] except KeyError as e: raise KeyError( f"Encountered category_id={annotation_category_id} " "but this id does not exist in 'categories' of the json file." ) from e objs.append(obj) record["annotations"] = objs record["task"] = "detection" record["dataset_name"] = dataset_name_in_dict dataset_dicts.append(record) if num_instances_without_valid_segmentation > 0: logger.warning( "Filtered out {} instances without valid segmentation. ".format( num_instances_without_valid_segmentation ) + "There might be issues in your dataset generation process. Please " "check https://detectron2.readthedocs.io/en/latest/tutorials/datasets.html carefully" ) return dataset_dicts def load_sem_seg(gt_root, image_root, gt_ext="png", image_ext="jpg"): """ Load semantic segmentation datasets. All files under "gt_root" with "gt_ext" extension are treated as ground truth annotations and all files under "image_root" with "image_ext" extension as input images. Ground truth and input images are matched using file paths relative to "gt_root" and "image_root" respectively without taking into account file extensions. This works for COCO as well as some other datasets. Args: gt_root (str): full path to ground truth semantic segmentation files. Semantic segmentation annotations are stored as images with integer values in pixels that represent corresponding semantic labels. image_root (str): the directory where the input images are. gt_ext (str): file extension for ground truth annotations. image_ext (str): file extension for input images. Returns: list[dict]: a list of dicts in detectron2 standard format without instance-level annotation. Notes: 1. This function does not read the image and ground truth files. The results do not have the "image" and "sem_seg" fields. """ # We match input images with ground truth based on their relative filepaths (without file # extensions) starting from 'image_root' and 'gt_root' respectively. def file2id(folder_path, file_path): # extract relative path starting from `folder_path` image_id = os.path.normpath(os.path.relpath(file_path, start=folder_path)) # remove file extension image_id = os.path.splitext(image_id)[0] return image_id input_files = sorted( (os.path.join(image_root, f) for f in PathManager.ls(image_root) if f.endswith(image_ext)), key=lambda file_path: file2id(image_root, file_path), ) gt_files = sorted( (os.path.join(gt_root, f) for f in PathManager.ls(gt_root) if f.endswith(gt_ext)), key=lambda file_path: file2id(gt_root, file_path), ) assert len(gt_files) > 0, "No annotations found in {}.".format(gt_root) # Use the intersection, so that val2017_100 annotations can run smoothly with val2017 images if len(input_files) != len(gt_files): logger.warn( "Directory {} and {} has {} and {} files, respectively.".format( image_root, gt_root, len(input_files), len(gt_files) ) ) input_basenames = [os.path.basename(f)[: -len(image_ext)] for f in input_files] gt_basenames = [os.path.basename(f)[: -len(gt_ext)] for f in gt_files] intersect = list(set(input_basenames) & set(gt_basenames)) # sort, otherwise each worker may obtain a list[dict] in different order intersect = sorted(intersect) logger.warn("Will use their intersection of {} files.".format(len(intersect))) input_files = [os.path.join(image_root, f + image_ext) for f in intersect] gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect] logger.info( "Loaded {} images with semantic segmentation from {}".format(len(input_files), image_root) ) dataset_dicts = [] for (img_path, gt_path) in zip(input_files, gt_files): record = {} record["file_name"] = img_path record["sem_seg_file_name"] = gt_path dataset_dicts.append(record) return dataset_dicts def convert_to_coco_dict(dataset_name): """ Convert an instance detection/segmentation or keypoint detection dataset in detectron2's standard format into COCO json format. Generic dataset description can be found here: https://detectron2.readthedocs.io/tutorials/datasets.html#register-a-dataset COCO data format description can be found here: http://cocodataset.org/#format-data Args: dataset_name (str): name of the source dataset Must be registered in DatastCatalog and in detectron2's standard format. Must have corresponding metadata "thing_classes" Returns: coco_dict: serializable dict in COCO json format """ dataset_dicts = DatasetCatalog.get(dataset_name) metadata = MetadataCatalog.get(dataset_name) # unmap the category mapping ids for COCO if hasattr(metadata, "thing_dataset_id_to_contiguous_id"): reverse_id_mapping = {v: k for k, v in metadata.thing_dataset_id_to_contiguous_id.items()} reverse_id_mapper = lambda contiguous_id: reverse_id_mapping[contiguous_id] # noqa else: reverse_id_mapper = lambda contiguous_id: contiguous_id # noqa categories = [ {"id": reverse_id_mapper(id), "name": name} for id, name in enumerate(metadata.thing_classes) ] logger.info("Converting dataset dicts into COCO format") coco_images = [] coco_annotations = [] for image_id, image_dict in enumerate(dataset_dicts): coco_image = { "id": image_dict.get("image_id", image_id), "width": int(image_dict["width"]), "height": int(image_dict["height"]), "file_name": str(image_dict["file_name"]), } coco_images.append(coco_image) anns_per_image = image_dict.get("annotations", []) for annotation in anns_per_image: # create a new dict with only COCO fields coco_annotation = {} # COCO requirement: XYWH box format for axis-align and XYWHA for rotated bbox = annotation["bbox"] if isinstance(bbox, np.ndarray): if bbox.ndim != 1: raise ValueError(f"bbox has to be 1-dimensional. Got shape={bbox.shape}.") bbox = bbox.tolist() if len(bbox) not in [4, 5]: raise ValueError(f"bbox has to has length 4 or 5. Got {bbox}.") from_bbox_mode = annotation["bbox_mode"] to_bbox_mode = BoxMode.XYWH_ABS if len(bbox) == 4 else BoxMode.XYWHA_ABS bbox = BoxMode.convert(bbox, from_bbox_mode, to_bbox_mode) # COCO requirement: instance area if "segmentation" in annotation: # Computing areas for instances by counting the pixels segmentation = annotation["segmentation"] # TODO: check segmentation type: RLE, BinaryMask or Polygon if isinstance(segmentation, list): polygons = PolygonMasks([segmentation]) area = polygons.area()[0].item() elif isinstance(segmentation, dict): # RLE area = mask_util.area(segmentation).item() else: raise TypeError(f"Unknown segmentation type {type(segmentation)}!") else: # Computing areas using bounding boxes if to_bbox_mode == BoxMode.XYWH_ABS: bbox_xy = BoxMode.convert(bbox, to_bbox_mode, BoxMode.XYXY_ABS) area = Boxes([bbox_xy]).area()[0].item() else:
area = RotatedBoxes([bbox]).area()[0].item()
3
2023-12-22 13:31:33+00:00
16k
Con6924/SPM
evaluate_task.py
[ { "identifier": "config", "path": "src/configs/config.py", "snippet": "PRECISION_TYPES = Literal[\"fp32\", \"fp16\", \"bf16\", \"float32\", \"float16\", \"bfloat16\"]\nclass PretrainedModelConfig(BaseModel):\nclass NetworkConfig(BaseModel):\nclass TrainConfig(BaseModel): \nclass SaveConfig(BaseModel)...
import argparse import gc import warnings import torch from pathlib import Path from typing import Literal from torch.utils.data import DataLoader from accelerate import PartialState, Accelerator from src.configs import config from src.configs.config import RootConfig from src.configs.generation_config import GenerationConfig from src.engine import train_util from src.evaluation import * from src.models import model_util from src.models.spm import SPMLayer, SPMNetwork from src.models.merge_spm import load_state_dict from src.misc.sld_pipeline import SLDPipeline
11,901
if args.task == "general": dataset_class = ClipTemplateDataset elif args.task == "artwork": dataset_class = ArtworkDataset elif args.task == "i2p": dataset_class = I2PDataset elif args.task == "coco": dataset_class = Coco30kGenerationDataset else: raise ValueError(f"Unknown task: {args.task}") dataset = dataset_class(**task_args, base_cfg=cfg) dataloader = DataLoader(dataset, batch_size=num_processes, num_workers=0, shuffle=False) return dataloader def get_evaluator(args): evaluator_class = None if args.task == "general": evaluator_class = ClipEvaluator elif args.task == "artwork": evaluator_class = ArtworkEvaluator elif args.task == "i2p": evaluator_class = I2PEvaluator elif args.task == "coco": evaluator_class = CocoEvaluator else: raise ValueError(f"Unknown task: {args.task}") evaluator = evaluator_class( save_folder=args.img_save_path, output_path=args.save_path ) return evaluator def calculate_matching_score( prompt_tokens, prompt_embeds, erased_prompt_tokens, erased_prompt_embeds, matching_metric: MATCHING_METRICS, special_token_ids: set[int], weight_dtype: torch.dtype = torch.float32, ): scores = [] if "allone" in matching_metric: scores.append(torch.ones(prompt_embeds.shape[0]).to("cpu", dtype=weight_dtype)) if "clipcos" in matching_metric: clipcos = torch.cosine_similarity( prompt_embeds.flatten(1, 2), erased_prompt_embeds.flatten(1, 2), dim=-1 ).cpu() scores.append(clipcos) if "tokenuni" in matching_metric: prompt_set = set(prompt_tokens[0].tolist()) - special_token_ids tokenuni = [] for ep in erased_prompt_tokens: ep_set = set(ep.tolist()) - special_token_ids tokenuni.append(len(prompt_set.intersection(ep_set)) / len(ep_set)) scores.append(torch.tensor(tokenuni).to("cpu", dtype=weight_dtype)) return torch.max(torch.stack(scores), dim=0)[0] @torch.no_grad() def infer_with_spm( dataloader: DataLoader, spm_paths: list[str], matching_metric: MATCHING_METRICS, facilitate_factor: float = 1.0, assigned_multipliers: list[float] = None, finetuned_model_path: str = None, sld_target_concept: str = None, base_model: str = "CompVis/stable-diffusion-v1-4", v2: bool = False, precision: str = "fp32", ): spm_model_paths = [ lp / f"{lp.name}_last.safetensors" if lp.is_dir() else lp for lp in spm_paths ] weight_dtype = config.parse_precision(precision) if finetuned_model_path is not None and Path(finetuned_model_path).is_dir(): # folder path for the diffuser model base_model = finetuned_model_path print(f"Using models from {base_model}") # load the pretrained SD tokenizer, text_encoder, unet, pipe = model_util.load_checkpoint_model( base_model, v2=v2, weight_dtype=weight_dtype, device=distributed_state.device, ) special_token_ids = set( tokenizer.convert_tokens_to_ids(tokenizer.special_tokens_map.values()) ) text_encoder.to(distributed_state.device, dtype=weight_dtype) text_encoder.eval() unet.to(distributed_state.device, dtype=weight_dtype) unet.enable_xformers_memory_efficient_attention() unet.requires_grad_(False) unet.eval() if len(spm_model_paths) > 0: # load the SPM models spms, metadatas = zip( *[ load_state_dict(spm_model_path, weight_dtype) for spm_model_path in spm_model_paths ] ) # check if SPMs are compatible assert all([metadata["rank"] == metadatas[0]["rank"] for metadata in metadatas]) # get the erased concept erased_prompts = [md["prompts"].split(",") for md in metadatas] erased_prompts_count = [len(ep) for ep in erased_prompts] print(f"Erased prompts: {erased_prompts}") erased_prompts_flatten = [item for sublist in erased_prompts for item in sublist]
DIFFUSERS_CACHE_DIR = ".cache/" UNET_NAME = "unet" TEXT_ENCODER_NAME = "text_encoder" MATCHING_METRICS = Literal[ "clipcos", "clipcos_tokenuni", "tokenuni", "allone", ] distributed_state = PartialState() accelerator = Accelerator() def flush(): torch.cuda.empty_cache() gc.collect() def parse_extra_args(extra_args): if extra_args is None or extra_args == ['']: return {} extra_args_dict = {} for extra_arg in extra_args: key, value = extra_arg.split("=") # convert value to various types if value.isdigit(): value = int(value) elif value.replace(".", "", 1).isdigit(): value = float(value) elif value[0] == "[" and value[-1] == "]": value = [i.replace('+', ' ') for i in value[1:-1].split(",")] value = [v.strip() for v in value] if value[0].isdigit(): value = [int(v) for v in value] elif value[0].replace(".", "", 1).isdigit(): value = [float(v) for v in value] extra_args_dict[key] = value return extra_args_dict def get_dataloader(args, num_processes=1): # parse task_args arguments task_args = parse_extra_args(args.task_args) task_args["save_folder"] = args.img_save_path task_args["output_path"] = args.save_path # parse generation arguments cfg = parse_extra_args(args.generation_cfg) cfg = GenerationConfig(**cfg) dataset_class = None if args.task == "general": dataset_class = ClipTemplateDataset elif args.task == "artwork": dataset_class = ArtworkDataset elif args.task == "i2p": dataset_class = I2PDataset elif args.task == "coco": dataset_class = Coco30kGenerationDataset else: raise ValueError(f"Unknown task: {args.task}") dataset = dataset_class(**task_args, base_cfg=cfg) dataloader = DataLoader(dataset, batch_size=num_processes, num_workers=0, shuffle=False) return dataloader def get_evaluator(args): evaluator_class = None if args.task == "general": evaluator_class = ClipEvaluator elif args.task == "artwork": evaluator_class = ArtworkEvaluator elif args.task == "i2p": evaluator_class = I2PEvaluator elif args.task == "coco": evaluator_class = CocoEvaluator else: raise ValueError(f"Unknown task: {args.task}") evaluator = evaluator_class( save_folder=args.img_save_path, output_path=args.save_path ) return evaluator def calculate_matching_score( prompt_tokens, prompt_embeds, erased_prompt_tokens, erased_prompt_embeds, matching_metric: MATCHING_METRICS, special_token_ids: set[int], weight_dtype: torch.dtype = torch.float32, ): scores = [] if "allone" in matching_metric: scores.append(torch.ones(prompt_embeds.shape[0]).to("cpu", dtype=weight_dtype)) if "clipcos" in matching_metric: clipcos = torch.cosine_similarity( prompt_embeds.flatten(1, 2), erased_prompt_embeds.flatten(1, 2), dim=-1 ).cpu() scores.append(clipcos) if "tokenuni" in matching_metric: prompt_set = set(prompt_tokens[0].tolist()) - special_token_ids tokenuni = [] for ep in erased_prompt_tokens: ep_set = set(ep.tolist()) - special_token_ids tokenuni.append(len(prompt_set.intersection(ep_set)) / len(ep_set)) scores.append(torch.tensor(tokenuni).to("cpu", dtype=weight_dtype)) return torch.max(torch.stack(scores), dim=0)[0] @torch.no_grad() def infer_with_spm( dataloader: DataLoader, spm_paths: list[str], matching_metric: MATCHING_METRICS, facilitate_factor: float = 1.0, assigned_multipliers: list[float] = None, finetuned_model_path: str = None, sld_target_concept: str = None, base_model: str = "CompVis/stable-diffusion-v1-4", v2: bool = False, precision: str = "fp32", ): spm_model_paths = [ lp / f"{lp.name}_last.safetensors" if lp.is_dir() else lp for lp in spm_paths ] weight_dtype = config.parse_precision(precision) if finetuned_model_path is not None and Path(finetuned_model_path).is_dir(): # folder path for the diffuser model base_model = finetuned_model_path print(f"Using models from {base_model}") # load the pretrained SD tokenizer, text_encoder, unet, pipe = model_util.load_checkpoint_model( base_model, v2=v2, weight_dtype=weight_dtype, device=distributed_state.device, ) special_token_ids = set( tokenizer.convert_tokens_to_ids(tokenizer.special_tokens_map.values()) ) text_encoder.to(distributed_state.device, dtype=weight_dtype) text_encoder.eval() unet.to(distributed_state.device, dtype=weight_dtype) unet.enable_xformers_memory_efficient_attention() unet.requires_grad_(False) unet.eval() if len(spm_model_paths) > 0: # load the SPM models spms, metadatas = zip( *[ load_state_dict(spm_model_path, weight_dtype) for spm_model_path in spm_model_paths ] ) # check if SPMs are compatible assert all([metadata["rank"] == metadatas[0]["rank"] for metadata in metadatas]) # get the erased concept erased_prompts = [md["prompts"].split(",") for md in metadatas] erased_prompts_count = [len(ep) for ep in erased_prompts] print(f"Erased prompts: {erased_prompts}") erased_prompts_flatten = [item for sublist in erased_prompts for item in sublist]
erased_prompt_embeds, erased_prompt_tokens = train_util.encode_prompts(
3
2023-12-26 03:19:16+00:00
16k
dakpinaroglu/Frame2seq
frame2seq/openfold/model/structure_module.py
[ { "identifier": "Linear", "path": "frame2seq/openfold/model/primitives.py", "snippet": "class Linear(nn.Linear):\n \"\"\"\n A Linear layer with built-in nonstandard initializations. Called just\n like torch.nn.Linear.\n\n Implements the initializers in 1.11.4, plus some additional ones found...
from functools import reduce from operator import mul from typing import Optional, Tuple, Sequence from frame2seq.openfold.model.primitives import Linear, LayerNorm, ipa_point_weights_init_ from frame2seq.openfold.np.residue_constants import ( restype_rigid_group_default_frame, restype_atom14_to_rigid_group, restype_atom14_mask, restype_atom14_rigid_group_positions, ) from frame2seq.openfold.utils.feats import ( frames_and_literature_positions_to_atom14_pos, torsion_angles_to_frames, ) from frame2seq.openfold.utils.precision_utils import is_fp16_enabled from frame2seq.openfold.utils.rigid_utils import Rotation, Rigid from frame2seq.openfold.utils.tensor_utils import ( dict_multimap, permute_final_dims, flatten_final_dims, ) import importlib import math import sys import torch import torch.nn as nn
14,160
self.c_s, self.c_z, self.c_ipa, self.no_heads_ipa, self.no_qk_points, self.no_v_points, inf=self.inf, eps=self.epsilon, ) self.ipa_dropout = nn.Dropout(self.dropout_rate) self.layer_norm_ipa = LayerNorm(self.c_s) self.transition = StructureModuleTransition( self.c_s, self.no_transition_layers, self.dropout_rate, ) self.bb_update = BackboneUpdate(self.c_s) self.angle_resnet = AngleResnet( self.c_s, self.c_resnet, self.no_resnet_blocks, self.no_angles, self.epsilon, ) def forward( self, evoformer_output_dict, aatype, mask=None, inplace_safe=False, _offload_inference=False, ): """ Args: evoformer_output_dict: Dictionary containing: "single": [*, N_res, C_s] single representation "pair": [*, N_res, N_res, C_z] pair representation aatype: [*, N_res] amino acid indices mask: Optional [*, N_res] sequence mask Returns: A dictionary of outputs """ s = evoformer_output_dict["single"] if mask is None: # [*, N] mask = s.new_ones(s.shape[:-1]) # [*, N, C_s] s = self.layer_norm_s(s) # [*, N, N, C_z] z = self.layer_norm_z(evoformer_output_dict["pair"]) z_reference_list = None if(_offload_inference): assert(sys.getrefcount(evoformer_output_dict["pair"]) == 2) evoformer_output_dict["pair"] = evoformer_output_dict["pair"].cpu() z_reference_list = [z] z = None # [*, N, C_s] s_initial = s s = self.linear_in(s) # [*, N] rigids = Rigid.identity( s.shape[:-1], s.dtype, s.device, self.training, fmt="quat", ) outputs = [] for i in range(self.no_blocks): # [*, N, C_s] s = s + self.ipa( s, z, rigids, mask, inplace_safe=inplace_safe, _offload_inference=_offload_inference, _z_reference_list=z_reference_list ) s = self.ipa_dropout(s) s = self.layer_norm_ipa(s) s = self.transition(s) # [*, N] rigids = rigids.compose_q_update_vec(self.bb_update(s)) # To hew as closely as possible to AlphaFold, we convert our # quaternion-based transformations to rotation-matrix ones # here backb_to_global = Rigid( Rotation( rot_mats=rigids.get_rots().get_rot_mats(), quats=None ), rigids.get_trans(), ) backb_to_global = backb_to_global.scale_translation( self.trans_scale_factor ) # [*, N, 7, 2] unnormalized_angles, angles = self.angle_resnet(s, s_initial)
# Copyright 2021 AlQuraishi Laboratory # Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. attn_core_inplace_cuda = False class AngleResnetBlock(nn.Module): def __init__(self, c_hidden): """ Args: c_hidden: Hidden channel dimension """ super(AngleResnetBlock, self).__init__() self.c_hidden = c_hidden self.linear_1 = Linear(self.c_hidden, self.c_hidden, init="relu") self.linear_2 = Linear(self.c_hidden, self.c_hidden, init="final") self.relu = nn.ReLU() def forward(self, a: torch.Tensor) -> torch.Tensor: s_initial = a a = self.relu(a) a = self.linear_1(a) a = self.relu(a) a = self.linear_2(a) return a + s_initial class AngleResnet(nn.Module): """ Implements Algorithm 20, lines 11-14 """ def __init__(self, c_in, c_hidden, no_blocks, no_angles, epsilon): """ Args: c_in: Input channel dimension c_hidden: Hidden channel dimension no_blocks: Number of resnet blocks no_angles: Number of torsion angles to generate epsilon: Small constant for normalization """ super(AngleResnet, self).__init__() self.c_in = c_in self.c_hidden = c_hidden self.no_blocks = no_blocks self.no_angles = no_angles self.eps = epsilon self.linear_in = Linear(self.c_in, self.c_hidden) self.linear_initial = Linear(self.c_in, self.c_hidden) self.layers = nn.ModuleList() for _ in range(self.no_blocks): layer = AngleResnetBlock(c_hidden=self.c_hidden) self.layers.append(layer) self.linear_out = Linear(self.c_hidden, self.no_angles * 2) self.relu = nn.ReLU() def forward( self, s: torch.Tensor, s_initial: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: s: [*, C_hidden] single embedding s_initial: [*, C_hidden] single embedding as of the start of the StructureModule Returns: [*, no_angles, 2] predicted angles """ # NOTE: The ReLU's applied to the inputs are absent from the supplement # pseudocode but present in the source. For maximal compatibility with # the pretrained weights, I'm going with the source. # [*, C_hidden] s_initial = self.relu(s_initial) s_initial = self.linear_initial(s_initial) s = self.relu(s) s = self.linear_in(s) s = s + s_initial for l in self.layers: s = l(s) s = self.relu(s) # [*, no_angles * 2] s = self.linear_out(s) # [*, no_angles, 2] s = s.view(s.shape[:-1] + (-1, 2)) unnormalized_s = s norm_denom = torch.sqrt( torch.clamp( torch.sum(s ** 2, dim=-1, keepdim=True), min=self.eps, ) ) s = s / norm_denom return unnormalized_s, s class InvariantPointAttention(nn.Module): """ Implements Algorithm 22. """ def __init__( self, c_s: int, c_z: int, c_hidden: int, no_heads: int, no_qk_points: int, no_v_points: int, inf: float = 1e5, eps: float = 1e-8, ): """ Args: c_s: Single representation channel dimension c_z: Pair representation channel dimension c_hidden: Hidden channel dimension no_heads: Number of attention heads no_qk_points: Number of query/key points to generate no_v_points: Number of value points to generate """ super(InvariantPointAttention, self).__init__() self.c_s = c_s self.c_z = c_z self.c_hidden = c_hidden self.no_heads = no_heads self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.inf = inf self.eps = eps # These linear layers differ from their specifications in the # supplement. There, they lack bias and use Glorot initialization. # Here as in the official source, they have bias and use the default # Lecun initialization. hc = self.c_hidden * self.no_heads self.linear_q = Linear(self.c_s, hc) self.linear_kv = Linear(self.c_s, 2 * hc) hpq = self.no_heads * self.no_qk_points * 3 self.linear_q_points = Linear(self.c_s, hpq) hpkv = self.no_heads * (self.no_qk_points + self.no_v_points) * 3 self.linear_kv_points = Linear(self.c_s, hpkv) hpv = self.no_heads * self.no_v_points * 3 self.linear_b = Linear(self.c_z, self.no_heads) self.head_weights = nn.Parameter(torch.zeros((no_heads))) ipa_point_weights_init_(self.head_weights) concat_out_dim = self.no_heads * ( self.c_z + self.c_hidden + self.no_v_points * 4 ) self.linear_out = Linear(concat_out_dim, self.c_s, init="final") self.softmax = nn.Softmax(dim=-1) self.softplus = nn.Softplus() def forward( self, s: torch.Tensor, z: Optional[torch.Tensor], r: Rigid, mask: torch.Tensor, inplace_safe: bool = False, _offload_inference: bool = False, _z_reference_list: Optional[Sequence[torch.Tensor]] = None, attn_drop_rate = 0.0, ) -> torch.Tensor: """ Args: s: [*, N_res, C_s] single representation z: [*, N_res, N_res, C_z] pair representation r: [*, N_res] transformation object mask: [*, N_res] mask Returns: [*, N_res, C_s] single representation update """ if(_offload_inference and inplace_safe): z = _z_reference_list else: z = [z] ####################################### # Generate scalar and point activations ####################################### # [*, N_res, H * C_hidden] q = self.linear_q(s) kv = self.linear_kv(s) # [*, N_res, H, C_hidden] q = q.view(q.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, 2 * C_hidden] kv = kv.view(kv.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, C_hidden] k, v = torch.split(kv, self.c_hidden, dim=-1) # [*, N_res, H * P_q * 3] q_pts = self.linear_q_points(s) # This is kind of clunky, but it's how the original does it # [*, N_res, H * P_q, 3] q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1) q_pts = torch.stack(q_pts, dim=-1) q_pts = r[..., None].apply(q_pts) # [*, N_res, H, P_q, 3] q_pts = q_pts.view( q_pts.shape[:-2] + (self.no_heads, self.no_qk_points, 3) ) # [*, N_res, H * (P_q + P_v) * 3] kv_pts = self.linear_kv_points(s) # [*, N_res, H * (P_q + P_v), 3] kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1) kv_pts = torch.stack(kv_pts, dim=-1) kv_pts = r[..., None].apply(kv_pts) # [*, N_res, H, (P_q + P_v), 3] kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.no_heads, -1, 3)) # [*, N_res, H, P_q/P_v, 3] k_pts, v_pts = torch.split( kv_pts, [self.no_qk_points, self.no_v_points], dim=-2 ) ########################## # Compute attention scores ########################## # [*, N_res, N_res, H] b = self.linear_b(z[0]) if(_offload_inference): assert(sys.getrefcount(z[0]) == 2) z[0] = z[0].cpu() # [*, H, N_res, N_res] if(is_fp16_enabled()): with torch.cuda.amp.autocast(enabled=False): a = torch.matmul( permute_final_dims(q.float(), (1, 0, 2)), # [*, H, N_res, C_hidden] permute_final_dims(k.float(), (1, 2, 0)), # [*, H, C_hidden, N_res] ) else: a = torch.matmul( permute_final_dims(q, (1, 0, 2)), # [*, H, N_res, C_hidden] permute_final_dims(k, (1, 2, 0)), # [*, H, C_hidden, N_res] ) a *= math.sqrt(1.0 / (3 * self.c_hidden)) a += (math.sqrt(1.0 / 3) * permute_final_dims(b, (2, 0, 1))) # [*, N_res, N_res, H, P_q, 3] pt_att = q_pts.unsqueeze(-4) - k_pts.unsqueeze(-5) if(inplace_safe): pt_att *= pt_att else: pt_att = pt_att ** 2 # [*, N_res, N_res, H, P_q] pt_att = sum(torch.unbind(pt_att, dim=-1)) head_weights = self.softplus(self.head_weights).view( *((1,) * len(pt_att.shape[:-2]) + (-1, 1)) ) head_weights = head_weights * math.sqrt( 1.0 / (3 * (self.no_qk_points * 9.0 / 2)) ) if(inplace_safe): pt_att *= head_weights else: pt_att = pt_att * head_weights # [*, N_res, N_res, H] pt_att = torch.sum(pt_att, dim=-1) * (-0.5) # [*, N_res, N_res] square_mask = mask.unsqueeze(-1) * mask.unsqueeze(-2) square_mask = self.inf * (square_mask - 1) """ Frame2seq implementation of IPA regularization via attention dropout """ if attn_drop_rate > 0.0: random_square_mask = torch.rand(square_mask.shape, device=square_mask.device) random_square_mask = self.inf * -1 * (random_square_mask < attn_drop_rate) square_mask += random_square_mask # [*, H, N_res, N_res] pt_att = permute_final_dims(pt_att, (2, 0, 1)) if(inplace_safe): a += pt_att del pt_att a += square_mask.unsqueeze(-3) # in-place softmax attn_core_inplace_cuda.forward_( a, reduce(mul, a.shape[:-1]), a.shape[-1], ) else: a = a + pt_att a = a + square_mask.unsqueeze(-3) a = self.softmax(a) ################ # Compute output ################ # [*, N_res, H, C_hidden] o = torch.matmul( a, v.transpose(-2, -3).to(dtype=a.dtype) ).transpose(-2, -3) # [*, N_res, H * C_hidden] o = flatten_final_dims(o, 2) # [*, H, 3, N_res, P_v] if(inplace_safe): v_pts = permute_final_dims(v_pts, (1, 3, 0, 2)) o_pt = [ torch.matmul(a, v.to(a.dtype)) for v in torch.unbind(v_pts, dim=-3) ] o_pt = torch.stack(o_pt, dim=-3) else: o_pt = torch.sum( ( a[..., None, :, :, None] * permute_final_dims(v_pts, (1, 3, 0, 2))[..., None, :, :] ), dim=-2, ) # [*, N_res, H, P_v, 3] o_pt = permute_final_dims(o_pt, (2, 0, 3, 1)) o_pt = r[..., None, None].invert_apply(o_pt) # [*, N_res, H * P_v] o_pt_norm = flatten_final_dims( torch.sqrt(torch.sum(o_pt ** 2, dim=-1) + self.eps), 2 ) # [*, N_res, H * P_v, 3] o_pt = o_pt.reshape(*o_pt.shape[:-3], -1, 3) if(_offload_inference): z[0] = z[0].to(o_pt.device) # [*, N_res, H, C_z] o_pair = torch.matmul(a.transpose(-2, -3), z[0].to(dtype=a.dtype)) # [*, N_res, H * C_z] o_pair = flatten_final_dims(o_pair, 2) # [*, N_res, C_s] s = self.linear_out( torch.cat( (o, *torch.unbind(o_pt, dim=-1), o_pt_norm, o_pair), dim=-1 ).to(dtype=z[0].dtype) ) return s class BackboneUpdate(nn.Module): """ Implements part of Algorithm 23. """ def __init__(self, c_s): """ Args: c_s: Single representation channel dimension """ super(BackboneUpdate, self).__init__() self.c_s = c_s self.linear = Linear(self.c_s, 6, init="final") def forward(self, s: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: [*, N_res, C_s] single representation Returns: [*, N_res, 6] update vector """ # [*, 6] update = self.linear(s) return update class StructureModuleTransitionLayer(nn.Module): def __init__(self, c): super(StructureModuleTransitionLayer, self).__init__() self.c = c self.linear_1 = Linear(self.c, self.c, init="relu") self.linear_2 = Linear(self.c, self.c, init="relu") self.linear_3 = Linear(self.c, self.c, init="final") self.relu = nn.ReLU() def forward(self, s): s_initial = s s = self.linear_1(s) s = self.relu(s) s = self.linear_2(s) s = self.relu(s) s = self.linear_3(s) s = s + s_initial return s class StructureModuleTransition(nn.Module): def __init__(self, c, num_layers, dropout_rate): super(StructureModuleTransition, self).__init__() self.c = c self.num_layers = num_layers self.dropout_rate = dropout_rate self.layers = nn.ModuleList() for _ in range(self.num_layers): l = StructureModuleTransitionLayer(self.c) self.layers.append(l) self.dropout = nn.Dropout(self.dropout_rate) self.layer_norm = LayerNorm(self.c) def forward(self, s): for l in self.layers: s = l(s) s = self.dropout(s) s = self.layer_norm(s) return s class StructureModule(nn.Module): def __init__( self, c_s, c_z, c_ipa, c_resnet, no_heads_ipa, no_qk_points, no_v_points, dropout_rate, no_blocks, no_transition_layers, no_resnet_blocks, no_angles, trans_scale_factor, epsilon, inf, **kwargs, ): """ Args: c_s: Single representation channel dimension c_z: Pair representation channel dimension c_ipa: IPA hidden channel dimension c_resnet: Angle resnet (Alg. 23 lines 11-14) hidden channel dimension no_heads_ipa: Number of IPA heads no_qk_points: Number of query/key points to generate during IPA no_v_points: Number of value points to generate during IPA dropout_rate: Dropout rate used throughout the layer no_blocks: Number of structure module blocks no_transition_layers: Number of layers in the single representation transition (Alg. 23 lines 8-9) no_resnet_blocks: Number of blocks in the angle resnet no_angles: Number of angles to generate in the angle resnet trans_scale_factor: Scale of single representation transition hidden dimension epsilon: Small number used in angle resnet normalization inf: Large number used for attention masking """ super(StructureModule, self).__init__() self.c_s = c_s self.c_z = c_z self.c_ipa = c_ipa self.c_resnet = c_resnet self.no_heads_ipa = no_heads_ipa self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.dropout_rate = dropout_rate self.no_blocks = no_blocks self.no_transition_layers = no_transition_layers self.no_resnet_blocks = no_resnet_blocks self.no_angles = no_angles self.trans_scale_factor = trans_scale_factor self.epsilon = epsilon self.inf = inf # Buffers to be lazily initialized later # self.default_frames # self.group_idx # self.atom_mask # self.lit_positions self.layer_norm_s = LayerNorm(self.c_s) self.layer_norm_z = LayerNorm(self.c_z) self.linear_in = Linear(self.c_s, self.c_s) self.ipa = InvariantPointAttention( self.c_s, self.c_z, self.c_ipa, self.no_heads_ipa, self.no_qk_points, self.no_v_points, inf=self.inf, eps=self.epsilon, ) self.ipa_dropout = nn.Dropout(self.dropout_rate) self.layer_norm_ipa = LayerNorm(self.c_s) self.transition = StructureModuleTransition( self.c_s, self.no_transition_layers, self.dropout_rate, ) self.bb_update = BackboneUpdate(self.c_s) self.angle_resnet = AngleResnet( self.c_s, self.c_resnet, self.no_resnet_blocks, self.no_angles, self.epsilon, ) def forward( self, evoformer_output_dict, aatype, mask=None, inplace_safe=False, _offload_inference=False, ): """ Args: evoformer_output_dict: Dictionary containing: "single": [*, N_res, C_s] single representation "pair": [*, N_res, N_res, C_z] pair representation aatype: [*, N_res] amino acid indices mask: Optional [*, N_res] sequence mask Returns: A dictionary of outputs """ s = evoformer_output_dict["single"] if mask is None: # [*, N] mask = s.new_ones(s.shape[:-1]) # [*, N, C_s] s = self.layer_norm_s(s) # [*, N, N, C_z] z = self.layer_norm_z(evoformer_output_dict["pair"]) z_reference_list = None if(_offload_inference): assert(sys.getrefcount(evoformer_output_dict["pair"]) == 2) evoformer_output_dict["pair"] = evoformer_output_dict["pair"].cpu() z_reference_list = [z] z = None # [*, N, C_s] s_initial = s s = self.linear_in(s) # [*, N] rigids = Rigid.identity( s.shape[:-1], s.dtype, s.device, self.training, fmt="quat", ) outputs = [] for i in range(self.no_blocks): # [*, N, C_s] s = s + self.ipa( s, z, rigids, mask, inplace_safe=inplace_safe, _offload_inference=_offload_inference, _z_reference_list=z_reference_list ) s = self.ipa_dropout(s) s = self.layer_norm_ipa(s) s = self.transition(s) # [*, N] rigids = rigids.compose_q_update_vec(self.bb_update(s)) # To hew as closely as possible to AlphaFold, we convert our # quaternion-based transformations to rotation-matrix ones # here backb_to_global = Rigid( Rotation( rot_mats=rigids.get_rots().get_rot_mats(), quats=None ), rigids.get_trans(), ) backb_to_global = backb_to_global.scale_translation( self.trans_scale_factor ) # [*, N, 7, 2] unnormalized_angles, angles = self.angle_resnet(s, s_initial)
all_frames_to_global = self.torsion_angles_to_frames(
5
2023-12-25 09:29:36+00:00
16k
KyanChen/TTP
mmpretrain/models/multimodal/clip/clip.py
[ { "identifier": "CIFAR100_CATEGORIES", "path": "mmpretrain/datasets/categories.py", "snippet": "CIFAR100_CATEGORIES = (\n 'apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee', 'beetle',\n 'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus', 'butterfly', 'camel',\n 'can', 'castle', '...
from abc import abstractmethod from typing import List, Optional, Tuple, Union from mmengine.model import BaseModel from torch import nn from mmpretrain.datasets.categories import (CIFAR100_CATEGORIES, IMAGENET_SIMPLE_CATEGORIES) from mmpretrain.registry import MODELS, TOKENIZER from mmpretrain.structures import DataSample from mmpretrain.utils import track_on_main_process from .utils import (OPENAI_CIFAR100_PROMPT, OPENAI_IMAGENET_PROMPT, OPENAI_IMAGENET_PROMPT_SUB) import numpy as np import torch import torch.nn.functional as F
11,382
data_samples: DataSample = None) -> DataSample: raise NotImplementedError def tokenize(self, texts: Union[str, List[str]]) -> torch.LongTensor: """Returns the tokenized representation of given input string(s) Args: texts (Union[str, List[str]]): An input string or a list of input strings to tokenize context_length (int): The context length to use. Defaults to 52. Returns: torch.Tensor: Resulting tokens. """ if isinstance(texts, str): texts = [texts] all_tokens = [] for text in texts: # adapt the text to Chinese BERT vocab # text = text.lower().replace('“', "\"").replace('”', "\"") # add special tokens all_tokens.append( [self.tokenizer.vocab['<|startoftext|>'] ] + # <|startoftext|>代表[CLS] token self.tokenizer.convert_tokens_to_ids( self.tokenizer.tokenize(text))[:self.context_length - 2] + [self.tokenizer.vocab['<|endoftext|>']]) result = torch.zeros( len(all_tokens), self.context_length, dtype=torch.long) for i, tokens in enumerate(all_tokens): assert len(tokens) <= self.context_length result[i, :len(tokens)] = torch.tensor(tokens) return result @MODELS.register_module() class CLIPZeroShot(CLIP): def __init__( self, vision_backbone: dict, projection: dict, text_backbone: dict, tokenizer: dict, vocab_size: int, transformer_width: int, proj_dim: int, context_length: int = 77, data_preprocessor: Optional[dict] = None, init_cfg: Optional[dict] = None, text_prototype: Union[str, List[str]] = 'imagenet', text_prompt: str = 'vanilla', ): super(CLIPZeroShot, self).__init__(vision_backbone, projection, text_backbone, tokenizer, vocab_size, transformer_width, proj_dim, context_length, data_preprocessor, init_cfg) # for zero-shot classification if isinstance(text_prototype, str) and text_prototype in PROTOTYPE_MAP.keys(): self.prototype = PROTOTYPE_MAP[text_prototype] else: self.prototype = text_prototype self.text_prototype_embeds = None self.prompt = PROMPT_MAP[text_prompt] def predict(self, images: torch.Tensor, data_samples: DataSample = None) -> DataSample: """Predict the classes of the input images. The prediction is for zero-shot classification and the text prototypes will be prepared in thisfunction. Args: images (torch.Tensor): The input images. data_samples (DataSample): The data samples with information from dataset. Returns: DataSample: The results of prediction. """ if self.text_prototype_embeds is None: self.prepare_text_prototype(device=images.device) image_features = self.extract_image_feat(images=images) image_features /= image_features.norm(dim=-1, keepdim=True) # cosine similarity as logits logits_per_image = image_features @ self.text_prototype_embeds.to( image_features.device) * self.logit_scale.exp() pred_scores = F.softmax(logits_per_image, dim=1) pred_labels = pred_scores.argmax(dim=1, keepdim=True).detach() out_data_samples = [] if data_samples is None: data_samples = [None for _ in range(pred_scores.size(0))] for data_sample, score, label in zip(data_samples, pred_scores, pred_labels): if data_sample is None: data_sample = DataSample() data_sample.set_pred_score(score).set_pred_label(label) out_data_samples.append(data_sample) return out_data_samples def prepare_text_prototype(self, device) -> None: """The function to prepare text prototypes with prompt.""" class_embeddings = []
# Copyright (c) OpenMMLab. All rights reserved. CIFAR100_CATEGORIES = [' '.join(c.split('_')) for c in CIFAR100_CATEGORIES] PROTOTYPE_MAP = { 'imagenet': IMAGENET_SIMPLE_CATEGORIES, 'cifar100': CIFAR100_CATEGORIES, } PROMPT_MAP = { 'openai_imagenet': OPENAI_IMAGENET_PROMPT, 'openai_cifar100': OPENAI_CIFAR100_PROMPT, 'vanilla': [lambda c: f'a photo of a {c}'], 'openai_imagenet_sub': OPENAI_IMAGENET_PROMPT_SUB } class LayerNorm(nn.LayerNorm): """Subclass torch's LayerNorm to handle fp16.""" def forward(self, x: torch.Tensor) -> torch.Tensor: """Forward function.""" orig_type = x.dtype ret = super().forward(x.type(torch.float32)) return ret.type(orig_type) class CLIP(BaseModel): """The implementation of `CLIP <https://arxiv.org/abs/2103.00020>`_. Args: vision_backbone (dict): Config dict for vision backbone. text_backbone (dict): Config dict for text backbone. tokenizer (dict): Config dict for text tokenizer. proj_dim (int): Projection dimension for similarity computation. text_prototype (str): Text prototype, which can be a key in `PROTOTYPE_MAP` or list of text. text_prompt (str): The prompt for text prototype. Defaults to 'vanilla',which refers to "a photo of {cls}". context_length (int): The context length to use. Defaults to 77. data_preprocessor (Union[dict, nn.Module], optional): The config for preprocessing input data. If None or no specified type, it will use "MultiModalDataPreprocessor" as type. See :class:`MultiModalDataPreprocessor` for more details. Defaults to None. init_cfg (dict, optional): The config to control the initialization. Defaults to None. """ def __init__(self, vision_backbone: dict, projection: dict, text_backbone: dict, tokenizer: dict, vocab_size: int, transformer_width: int, proj_dim: int, context_length: int = 77, data_preprocessor: Optional[dict] = None, init_cfg: Optional[dict] = None): if data_preprocessor is None: data_preprocessor = {} data_preprocessor.setdefault('type', 'MultiModalDataPreprocessor') data_preprocessor = MODELS.build(data_preprocessor) super().__init__( data_preprocessor=data_preprocessor, init_cfg=init_cfg) self.context_length = context_length # build the vision transformer self.visual = MODELS.build(vision_backbone) # build the visual projection self.visual_proj = MODELS.build(projection) # build attn_mask for casual-attn text_backbone['attn_mask'] = self.build_attention_mask() # build the text transformer self.transformer = MODELS.build(text_backbone) self.vocab_size = vocab_size self.token_embedding = nn.Embedding(vocab_size, transformer_width) self.positional_embedding = nn.Parameter( torch.empty(self.context_length, transformer_width)) self.ln_final = LayerNorm(transformer_width) self.text_projection = nn.Parameter( torch.empty(transformer_width, proj_dim)) self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) self.initialize_parameters() self.tokenizer = TOKENIZER.build(tokenizer) self.tokenizer.vocab = self.tokenizer.get_vocab( ) # CLIPTokenizer has no attribute named 'vocab', so manually def initialize_parameters(self) -> None: """Initialize the parameters. The pretrained weight will override the initialized parameters by this function. """ nn.init.normal_(self.token_embedding.weight, std=0.02) nn.init.normal_(self.positional_embedding, std=0.01) proj_std = (self.transformer.width**-0.5) * ( (2 * self.transformer.layers)**-0.5) attn_std = self.transformer.width**-0.5 fc_std = (2 * self.transformer.width)**-0.5 for block in self.transformer.resblocks: nn.init.normal_(block.attn.in_proj_weight, std=attn_std) nn.init.normal_(block.attn.out_proj.weight, std=proj_std) nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) if self.text_projection is not None: nn.init.normal_( self.text_projection, std=self.transformer.width**-0.5) def build_attention_mask(self): # lazily create causal attention mask, # with full attention between the vision tokens # pytorch uses additive attention mask; fill with -inf mask = torch.empty(self.context_length, self.context_length) mask.fill_(float('-inf')) mask.triu_(1) # zero out the lower diagonal return mask def forward( self, images: torch.Tensor, data_samples: Optional[list] = None, mode: str = 'predict', **kwargs, ): """The unified entry for a forward process in both training and test. The method accepts the following modes: - "predict": Forward and return a list of data samples contain the predict results. Args: images (torch.Tensor): the preprocessed image tensor of shape ``(N, C, H, W)``. data_samples (List[DataSample], optional): The annotation data of every samples. Defaults to None. mode (str): Return what kind of value. Defaults to 'predict'. """ if mode == 'predict': return self.predict(images, data_samples, **kwargs) else: raise RuntimeError(f'Invalid mode "{mode}".') def extract_image_feat(self, images: torch.Tensor) -> torch.Tensor: """The function to extract image latent features.""" return self.visual_proj(self.visual(images))[0] def extract_text_feat(self, texts: torch.Tensor) -> torch.Tensor: """The function to extract text latent features.""" x = self.token_embedding(texts) # [batch_size, n_ctx, d_model] x = x + self.positional_embedding x = x.permute(1, 0, 2) # NLD -> LND x = self.transformer(x)[0] x = x.permute(1, 0, 2) # LND -> NLD x = self.ln_final(x) # x.shape = [batch_size, n_ctx, transformer.width] # take features from the eot embedding # (eot_token is the highest number in each sequence) x = x[torch.arange(x.shape[0]), texts.argmax(dim=-1)] @ self.text_projection return x def extract_feat( self, images: torch.Tensor, texts: torch.Tensor) -> Union[torch.Tensor, Tuple[torch.Tensor]]: """The function to extract image and text latent features, the input image or text can not both be None.""" assert images is not None or texts is not None, \ 'text and image cannot both be None!' if images is None: return self.extract_text_feat(texts) elif texts is None: return self.extract_image_feat(images) image_features = self.extract_image_feat(images) text_features = self.extract_text_feat(texts) image_features = image_features / image_features.norm( dim=-1, keepdim=True) text_features = text_features / text_features.norm( dim=-1, keepdim=True) return image_features, text_features def compute_similarity(self, images, texts): """Extract images and texts features and compute cosine similarity.""" image_features, text_features = self.extract_feat( images=images, texts=texts) # cosine similarity as logits logit_scale = self.logit_scale.exp() logits_per_image = logit_scale * image_features @ text_features.t() logits_per_text = logits_per_image.t() # shape (N, N) return logits_per_image, logits_per_text @abstractmethod def predict(self, images: torch.Tensor, data_samples: DataSample = None) -> DataSample: raise NotImplementedError def tokenize(self, texts: Union[str, List[str]]) -> torch.LongTensor: """Returns the tokenized representation of given input string(s) Args: texts (Union[str, List[str]]): An input string or a list of input strings to tokenize context_length (int): The context length to use. Defaults to 52. Returns: torch.Tensor: Resulting tokens. """ if isinstance(texts, str): texts = [texts] all_tokens = [] for text in texts: # adapt the text to Chinese BERT vocab # text = text.lower().replace('“', "\"").replace('”', "\"") # add special tokens all_tokens.append( [self.tokenizer.vocab['<|startoftext|>'] ] + # <|startoftext|>代表[CLS] token self.tokenizer.convert_tokens_to_ids( self.tokenizer.tokenize(text))[:self.context_length - 2] + [self.tokenizer.vocab['<|endoftext|>']]) result = torch.zeros( len(all_tokens), self.context_length, dtype=torch.long) for i, tokens in enumerate(all_tokens): assert len(tokens) <= self.context_length result[i, :len(tokens)] = torch.tensor(tokens) return result @MODELS.register_module() class CLIPZeroShot(CLIP): def __init__( self, vision_backbone: dict, projection: dict, text_backbone: dict, tokenizer: dict, vocab_size: int, transformer_width: int, proj_dim: int, context_length: int = 77, data_preprocessor: Optional[dict] = None, init_cfg: Optional[dict] = None, text_prototype: Union[str, List[str]] = 'imagenet', text_prompt: str = 'vanilla', ): super(CLIPZeroShot, self).__init__(vision_backbone, projection, text_backbone, tokenizer, vocab_size, transformer_width, proj_dim, context_length, data_preprocessor, init_cfg) # for zero-shot classification if isinstance(text_prototype, str) and text_prototype in PROTOTYPE_MAP.keys(): self.prototype = PROTOTYPE_MAP[text_prototype] else: self.prototype = text_prototype self.text_prototype_embeds = None self.prompt = PROMPT_MAP[text_prompt] def predict(self, images: torch.Tensor, data_samples: DataSample = None) -> DataSample: """Predict the classes of the input images. The prediction is for zero-shot classification and the text prototypes will be prepared in thisfunction. Args: images (torch.Tensor): The input images. data_samples (DataSample): The data samples with information from dataset. Returns: DataSample: The results of prediction. """ if self.text_prototype_embeds is None: self.prepare_text_prototype(device=images.device) image_features = self.extract_image_feat(images=images) image_features /= image_features.norm(dim=-1, keepdim=True) # cosine similarity as logits logits_per_image = image_features @ self.text_prototype_embeds.to( image_features.device) * self.logit_scale.exp() pred_scores = F.softmax(logits_per_image, dim=1) pred_labels = pred_scores.argmax(dim=1, keepdim=True).detach() out_data_samples = [] if data_samples is None: data_samples = [None for _ in range(pred_scores.size(0))] for data_sample, score, label in zip(data_samples, pred_scores, pred_labels): if data_sample is None: data_sample = DataSample() data_sample.set_pred_score(score).set_pred_label(label) out_data_samples.append(data_sample) return out_data_samples def prepare_text_prototype(self, device) -> None: """The function to prepare text prototypes with prompt.""" class_embeddings = []
for classname in track_on_main_process(self.prototype,
5
2023-12-23 08:36:47+00:00
16k