repo_name stringlengths 7 71 | file_path stringlengths 5 118 | context list | import_statement stringlengths 45 12.5k | token_num int64 641 99.4k | cropped_code stringlengths 44 17k | all_code stringlengths 43 754k | next_line stringlengths 2 330 | gold_snippet_index int64 0 68 | created_at stringlengths 25 25 | level stringclasses 9
values |
|---|---|---|---|---|---|---|---|---|---|---|
Secilia-Cxy/UNetTFI | train.py | [
{
"identifier": "recall_precision_f1_acc",
"path": "utils/evaluate.py",
"snippet": "def recall_precision_f1_acc(y=None, y_hat=None, cm=None):\n \"\"\" returns metrics for recall, precision, f1, accuracy\n\n Args:\n y (numpy array): ground truth \n y_hat (numpy array): prediction \n\n... | import argparse
import copy
import numpy as np
import pytorch_lightning as pl
import datetime
import os
import torch
import torch.nn.functional as F
import wandb
from pytorch_lightning.callbacks import ModelCheckpoint, ModelSummary
from pytorch_lightning.plugins import DDPPlugin
from torch.utils.data import DataLoader, ConcatDataset
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from utils.evaluate import recall_precision_f1_acc, get_confusion_matrix
from models.unet_lightning_w4c23 import UNet_Lightning as UNetModel
from utils.data_utils import load_config
from utils.data_utils import get_cuda_memory_usage
from utils.data_utils import tensor_to_submission_file
from utils.data_utils import get_dict_value
from utils.w4c_dataloader import RainData
from utils.evaluate import to_one_hot | 9,492 | def val_dataloader(self):
return self.__load_dataloader(self.val_ds, shuffle=False, pin=True)
def test_dataloader(self):
return self.__load_dataloader(self.test_ds, shuffle=False, pin=True)
def load_model(Model, params, checkpoint_path='') -> pl.LightningModule:
""" loads a model from a checkpoint or from scratch if checkpoint_path='' """
p = {**params['experiment'], **params['dataset'], **params['train']}
if checkpoint_path == '':
print('-> Modelling from scratch! (no checkpoint loaded)')
model = Model(params['model'], p)
else:
print(f'-> Loading model checkpoint: {checkpoint_path}')
model = Model.load_from_checkpoint(checkpoint_path, UNet_params=params['model'], params=p)
return model
def get_trainer(gpus, params, mode):
""" get the trainer, modify here its options:
- save_top_k
"""
max_epochs = params['train']['max_epochs']
# max_epochs = 1
print("Trainig for", max_epochs, "epochs")
checkpoint_callback = ModelCheckpoint(monitor='val_loss_epoch', save_top_k=90, save_last=True,
filename='{epoch:02d}-{val_loss_epoch:.6f}')
parallel_training = None
ddpplugin = None
if gpus[0] == -1:
gpus = None
elif len(gpus) > 1:
parallel_training = 'ddp'
## ddpplugin = DDPPlugin(find_unused_parameters=True)
print(f"====== process started on the following GPUs: {gpus} ======")
date_time = datetime.datetime.now().strftime("%m%d-%H:%M")
version = params['experiment']['name']
version = version + '_' + date_time
# SET LOGGER
# if params['experiment']['logging']:
# tb_logger = pl_loggers.TensorBoardLogger(save_dir=params['experiment']['experiment_folder'],name=params['experiment']['sub_folder'], version=version, log_graph=True)
# else:
# tb_logger = False
if params['experiment']['logging'] and mode != "predict" and mode != "val":
# Create a WandbLogger instead of TensorBoardLogger
wandb_logger = WandbLogger(
project='w4c23',
save_dir=params['experiment']['experiment_folder'],
name=params['experiment']['sub_folder'],
)
else:
wandb_logger = False
if mode == "predict" or mode == "val" or len(gpus) <= 1:
strategy = None
else:
strategy = "ddp"
if params['train']['early_stopping']:
early_stop_callback = EarlyStopping(monitor="val_loss_epoch",
patience=params['train']['patience'],
mode="min")
callback_funcs = [checkpoint_callback, ModelSummary(max_depth=2), early_stop_callback]
else:
callback_funcs = [checkpoint_callback, ModelSummary(max_depth=2)]
trainer = pl.Trainer(devices=gpus, max_epochs=max_epochs,
gradient_clip_val=params['model']['gradient_clip_val'],
gradient_clip_algorithm=params['model']['gradient_clip_algorithm'],
accelerator="gpu",
callbacks=callback_funcs, logger=wandb_logger,
# profiler='simple',
# fast_dev_run=3,
# log_every_n_steps=1,
precision=params['experiment']['precision'],
strategy=strategy
)
return trainer
def to_number(y_hat, nums=None, thres=None):
if nums is None:
nums = torch.tensor([0, 0.6, 3, 7.5, 12.5, 16]).reshape(1, 6, 1, 1, 1).to(y_hat.device)
num_classes = 6
y_hat = F.softmax(y_hat, dim=1)
if thres is not None:
y_sum = 1 - torch.cumsum(y_hat, dim=1)
y_hat = torch.argmax((y_sum < torch.tensor(thres + [2], device=y_sum.device).reshape(1, 6, 1, 1, 1)).long(),
dim=1)
else:
y_hat = torch.argmax(y_hat, dim=1)
y_hat = F.one_hot(y_hat, num_classes=num_classes).permute(0, 4, 1, 2, 3)
ret = torch.sum(y_hat * nums, axis=1, keepdim=True)
return y_hat, ret
def do_predict(trainer, model, predict_params, test_data):
ret = 0
test_batch = trainer.predict(model, dataloaders=test_data)
scores = torch.cat([b[0] for b in test_batch])
_, scores = to_number(scores)
tensor_to_submission_file(scores, predict_params)
return ret
def do_test(trainer, model, test_data):
scores = trainer.test(model, dataloaders=test_data)
def do_val(trainer, model, test_data):
scores = trainer.validate(model, dataloaders=test_data)
def train(params, gpus, mode, checkpoint_path, model=UNetModel, tune=True):
""" main training/evaluation method
"""
# ------------
# model & data
# ------------
| # Weather4cast 2023 Starter Kit
#
# This Starter Kit builds on and extends the Weather4cast 2022 Starter Kit,
# the original license for which is included below.
#
# In line with the provisions of this license, all changes and additional
# code are also released unde the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# Weather4cast 2022 Starter Kit
#
# Copyright (C) 2022
# Institute of Advanced Research in Artificial Intelligence (IARAI)
# This file is part of the Weather4cast 2022 Starter Kit.
#
# The Weather4cast 2022 Starter Kit is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# The Weather4cast 2022 Starter Kit is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Contributors: Aleksandra Gruca, Pedro Herruzo, David Kreil, Stephen Moran
class DataModule(pl.LightningDataModule):
""" Class to handle training/validation splits in a single object
"""
def __init__(self, params, training_params, mode):
super().__init__()
self.params = params
self.training_params = training_params
concat_train_val = get_dict_value(training_params, 'concat_train_val', False)
print("----------------------- concat_train_val: ", concat_train_val)
if mode in ['train']:
print("Loading TRAINING/VALIDATION dataset -- as test")
if concat_train_val:
self.val_ds = RainData('validation', **self.params)
self.train_ds = ConcatDataset([RainData('training', **self.params), self.val_ds])
else:
self.train_ds = RainData('training', **self.params)
self.val_ds = RainData('validation', **self.params)
print(f"Training dataset size: {len(self.train_ds)}")
if mode in ['val']:
print("Loading VALIDATION dataset -- as test")
self.val_ds = RainData('validation', **self.params)
if mode in ['predict']:
print("Loading PREDICTION/TEST dataset -- as test")
self.test_ds = RainData('test', **self.params)
def __load_dataloader(self, dataset, shuffle=True, pin=True):
dl = DataLoader(dataset,
batch_size=self.training_params['batch_size'],
num_workers=self.training_params['n_workers'],
shuffle=shuffle,
pin_memory=pin, prefetch_factor=2,
persistent_workers=False)
return dl
def train_dataloader(self):
return self.__load_dataloader(self.train_ds, shuffle=True, pin=True)
def val_dataloader(self):
return self.__load_dataloader(self.val_ds, shuffle=False, pin=True)
def test_dataloader(self):
return self.__load_dataloader(self.test_ds, shuffle=False, pin=True)
def load_model(Model, params, checkpoint_path='') -> pl.LightningModule:
""" loads a model from a checkpoint or from scratch if checkpoint_path='' """
p = {**params['experiment'], **params['dataset'], **params['train']}
if checkpoint_path == '':
print('-> Modelling from scratch! (no checkpoint loaded)')
model = Model(params['model'], p)
else:
print(f'-> Loading model checkpoint: {checkpoint_path}')
model = Model.load_from_checkpoint(checkpoint_path, UNet_params=params['model'], params=p)
return model
def get_trainer(gpus, params, mode):
""" get the trainer, modify here its options:
- save_top_k
"""
max_epochs = params['train']['max_epochs']
# max_epochs = 1
print("Trainig for", max_epochs, "epochs")
checkpoint_callback = ModelCheckpoint(monitor='val_loss_epoch', save_top_k=90, save_last=True,
filename='{epoch:02d}-{val_loss_epoch:.6f}')
parallel_training = None
ddpplugin = None
if gpus[0] == -1:
gpus = None
elif len(gpus) > 1:
parallel_training = 'ddp'
## ddpplugin = DDPPlugin(find_unused_parameters=True)
print(f"====== process started on the following GPUs: {gpus} ======")
date_time = datetime.datetime.now().strftime("%m%d-%H:%M")
version = params['experiment']['name']
version = version + '_' + date_time
# SET LOGGER
# if params['experiment']['logging']:
# tb_logger = pl_loggers.TensorBoardLogger(save_dir=params['experiment']['experiment_folder'],name=params['experiment']['sub_folder'], version=version, log_graph=True)
# else:
# tb_logger = False
if params['experiment']['logging'] and mode != "predict" and mode != "val":
# Create a WandbLogger instead of TensorBoardLogger
wandb_logger = WandbLogger(
project='w4c23',
save_dir=params['experiment']['experiment_folder'],
name=params['experiment']['sub_folder'],
)
else:
wandb_logger = False
if mode == "predict" or mode == "val" or len(gpus) <= 1:
strategy = None
else:
strategy = "ddp"
if params['train']['early_stopping']:
early_stop_callback = EarlyStopping(monitor="val_loss_epoch",
patience=params['train']['patience'],
mode="min")
callback_funcs = [checkpoint_callback, ModelSummary(max_depth=2), early_stop_callback]
else:
callback_funcs = [checkpoint_callback, ModelSummary(max_depth=2)]
trainer = pl.Trainer(devices=gpus, max_epochs=max_epochs,
gradient_clip_val=params['model']['gradient_clip_val'],
gradient_clip_algorithm=params['model']['gradient_clip_algorithm'],
accelerator="gpu",
callbacks=callback_funcs, logger=wandb_logger,
# profiler='simple',
# fast_dev_run=3,
# log_every_n_steps=1,
precision=params['experiment']['precision'],
strategy=strategy
)
return trainer
def to_number(y_hat, nums=None, thres=None):
if nums is None:
nums = torch.tensor([0, 0.6, 3, 7.5, 12.5, 16]).reshape(1, 6, 1, 1, 1).to(y_hat.device)
num_classes = 6
y_hat = F.softmax(y_hat, dim=1)
if thres is not None:
y_sum = 1 - torch.cumsum(y_hat, dim=1)
y_hat = torch.argmax((y_sum < torch.tensor(thres + [2], device=y_sum.device).reshape(1, 6, 1, 1, 1)).long(),
dim=1)
else:
y_hat = torch.argmax(y_hat, dim=1)
y_hat = F.one_hot(y_hat, num_classes=num_classes).permute(0, 4, 1, 2, 3)
ret = torch.sum(y_hat * nums, axis=1, keepdim=True)
return y_hat, ret
def do_predict(trainer, model, predict_params, test_data):
ret = 0
test_batch = trainer.predict(model, dataloaders=test_data)
scores = torch.cat([b[0] for b in test_batch])
_, scores = to_number(scores)
tensor_to_submission_file(scores, predict_params)
return ret
def do_test(trainer, model, test_data):
scores = trainer.test(model, dataloaders=test_data)
def do_val(trainer, model, test_data):
scores = trainer.validate(model, dataloaders=test_data)
def train(params, gpus, mode, checkpoint_path, model=UNetModel, tune=True):
""" main training/evaluation method
"""
# ------------
# model & data
# ------------ | get_cuda_memory_usage(gpus) | 4 | 2023-11-30 06:12:26+00:00 | 12k |
opisaac9001/TTS-With-ooba-and-voice | TTS/tts/layers/delightful_tts/acoustic_model.py | [
{
"identifier": "Conformer",
"path": "TTS/tts/layers/delightful_tts/conformer.py",
"snippet": "class Conformer(nn.Module):\n def __init__(\n self,\n dim: int,\n n_layers: int,\n n_heads: int,\n speaker_embedding_dim: int,\n p_dropout: float,\n kernel_s... | from typing import Callable, Dict, Tuple
from coqpit import Coqpit
from torch import nn
from TTS.tts.layers.delightful_tts.conformer import Conformer
from TTS.tts.layers.delightful_tts.encoders import (
PhonemeLevelProsodyEncoder,
UtteranceLevelProsodyEncoder,
get_mask_from_lengths,
)
from TTS.tts.layers.delightful_tts.energy_adaptor import EnergyAdaptor
from TTS.tts.layers.delightful_tts.networks import EmbeddingPadded, positional_encoding
from TTS.tts.layers.delightful_tts.phoneme_prosody_predictor import PhonemeProsodyPredictor
from TTS.tts.layers.delightful_tts.pitch_adaptor import PitchAdaptor
from TTS.tts.layers.delightful_tts.variance_predictor import VariancePredictor
from TTS.tts.layers.generic.aligner import AlignmentNetwork
from TTS.tts.utils.helpers import generate_path, maximum_path, sequence_mask
import torch
import torch.nn.functional as F | 7,469 | ### credit: https://github.com/dunky11/voicesmith
class AcousticModel(torch.nn.Module):
def __init__(
self,
args: "ModelArgs",
tokenizer: "TTSTokenizer" = None,
speaker_manager: "SpeakerManager" = None,
):
super().__init__()
self.args = args
self.tokenizer = tokenizer
self.speaker_manager = speaker_manager
self.init_multispeaker(args)
# self.set_embedding_dims()
self.length_scale = (
float(self.args.length_scale) if isinstance(self.args.length_scale, int) else self.args.length_scale
)
self.emb_dim = args.n_hidden_conformer_encoder
self.encoder = Conformer(
dim=self.args.n_hidden_conformer_encoder,
n_layers=self.args.n_layers_conformer_encoder,
n_heads=self.args.n_heads_conformer_encoder,
speaker_embedding_dim=self.embedded_speaker_dim,
p_dropout=self.args.dropout_conformer_encoder,
kernel_size_conv_mod=self.args.kernel_size_conv_mod_conformer_encoder,
lrelu_slope=self.args.lrelu_slope,
)
self.pitch_adaptor = PitchAdaptor(
n_input=self.args.n_hidden_conformer_encoder,
n_hidden=self.args.n_hidden_variance_adaptor,
n_out=1,
kernel_size=self.args.kernel_size_variance_adaptor,
emb_kernel_size=self.args.emb_kernel_size_variance_adaptor,
p_dropout=self.args.dropout_variance_adaptor,
lrelu_slope=self.args.lrelu_slope,
)
self.energy_adaptor = EnergyAdaptor(
channels_in=self.args.n_hidden_conformer_encoder,
channels_hidden=self.args.n_hidden_variance_adaptor,
channels_out=1,
kernel_size=self.args.kernel_size_variance_adaptor,
emb_kernel_size=self.args.emb_kernel_size_variance_adaptor,
dropout=self.args.dropout_variance_adaptor,
lrelu_slope=self.args.lrelu_slope,
)
self.aligner = AlignmentNetwork(
in_query_channels=self.args.out_channels,
in_key_channels=self.args.n_hidden_conformer_encoder,
)
self.duration_predictor = VariancePredictor(
channels_in=self.args.n_hidden_conformer_encoder,
channels=self.args.n_hidden_variance_adaptor,
channels_out=1,
kernel_size=self.args.kernel_size_variance_adaptor,
p_dropout=self.args.dropout_variance_adaptor,
lrelu_slope=self.args.lrelu_slope,
)
self.utterance_prosody_encoder = UtteranceLevelProsodyEncoder(
num_mels=self.args.num_mels,
ref_enc_filters=self.args.ref_enc_filters_reference_encoder,
ref_enc_size=self.args.ref_enc_size_reference_encoder,
ref_enc_gru_size=self.args.ref_enc_gru_size_reference_encoder,
ref_enc_strides=self.args.ref_enc_strides_reference_encoder,
n_hidden=self.args.n_hidden_conformer_encoder,
dropout=self.args.dropout_conformer_encoder,
bottleneck_size_u=self.args.bottleneck_size_u_reference_encoder,
token_num=self.args.token_num_reference_encoder,
)
| ### credit: https://github.com/dunky11/voicesmith
class AcousticModel(torch.nn.Module):
def __init__(
self,
args: "ModelArgs",
tokenizer: "TTSTokenizer" = None,
speaker_manager: "SpeakerManager" = None,
):
super().__init__()
self.args = args
self.tokenizer = tokenizer
self.speaker_manager = speaker_manager
self.init_multispeaker(args)
# self.set_embedding_dims()
self.length_scale = (
float(self.args.length_scale) if isinstance(self.args.length_scale, int) else self.args.length_scale
)
self.emb_dim = args.n_hidden_conformer_encoder
self.encoder = Conformer(
dim=self.args.n_hidden_conformer_encoder,
n_layers=self.args.n_layers_conformer_encoder,
n_heads=self.args.n_heads_conformer_encoder,
speaker_embedding_dim=self.embedded_speaker_dim,
p_dropout=self.args.dropout_conformer_encoder,
kernel_size_conv_mod=self.args.kernel_size_conv_mod_conformer_encoder,
lrelu_slope=self.args.lrelu_slope,
)
self.pitch_adaptor = PitchAdaptor(
n_input=self.args.n_hidden_conformer_encoder,
n_hidden=self.args.n_hidden_variance_adaptor,
n_out=1,
kernel_size=self.args.kernel_size_variance_adaptor,
emb_kernel_size=self.args.emb_kernel_size_variance_adaptor,
p_dropout=self.args.dropout_variance_adaptor,
lrelu_slope=self.args.lrelu_slope,
)
self.energy_adaptor = EnergyAdaptor(
channels_in=self.args.n_hidden_conformer_encoder,
channels_hidden=self.args.n_hidden_variance_adaptor,
channels_out=1,
kernel_size=self.args.kernel_size_variance_adaptor,
emb_kernel_size=self.args.emb_kernel_size_variance_adaptor,
dropout=self.args.dropout_variance_adaptor,
lrelu_slope=self.args.lrelu_slope,
)
self.aligner = AlignmentNetwork(
in_query_channels=self.args.out_channels,
in_key_channels=self.args.n_hidden_conformer_encoder,
)
self.duration_predictor = VariancePredictor(
channels_in=self.args.n_hidden_conformer_encoder,
channels=self.args.n_hidden_variance_adaptor,
channels_out=1,
kernel_size=self.args.kernel_size_variance_adaptor,
p_dropout=self.args.dropout_variance_adaptor,
lrelu_slope=self.args.lrelu_slope,
)
self.utterance_prosody_encoder = UtteranceLevelProsodyEncoder(
num_mels=self.args.num_mels,
ref_enc_filters=self.args.ref_enc_filters_reference_encoder,
ref_enc_size=self.args.ref_enc_size_reference_encoder,
ref_enc_gru_size=self.args.ref_enc_gru_size_reference_encoder,
ref_enc_strides=self.args.ref_enc_strides_reference_encoder,
n_hidden=self.args.n_hidden_conformer_encoder,
dropout=self.args.dropout_conformer_encoder,
bottleneck_size_u=self.args.bottleneck_size_u_reference_encoder,
token_num=self.args.token_num_reference_encoder,
)
| self.utterance_prosody_predictor = PhonemeProsodyPredictor( | 7 | 2023-11-29 08:15:06+00:00 | 12k |
magic-research/magic-animate | magicanimate/models/unet.py | [
{
"identifier": "CrossAttnDownBlock3D",
"path": "magicanimate/models/unet_3d_blocks.py",
"snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_la... | from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.models.modeling_utils import ModelMixin
from diffusers.utils import BaseOutput, logging
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from .unet_3d_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block,
)
from .resnet import InflatedConv3d
from diffusers.utils import WEIGHTS_NAME
import os
import json
import pdb
import torch
import torch.nn as nn
import torch.utils.checkpoint | 9,109 | # up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_attention_head_dim = list(reversed(attention_head_dim))
only_cross_attention = list(reversed(only_cross_attention))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
res = 2 ** (3 - i)
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=reversed_attention_head_dim[i],
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module and (res in motion_module_resolutions),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)
self.conv_act = nn.SiLU()
self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_slicable_layers * [1]
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def _set_gradient_checkpointing(self, module, value=False):
| # *************************************************************************
# This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo-
# difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B-
# ytedance Inc..
# *************************************************************************
# Adapted from https://github.com/guoyww/AnimateDiff
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
center_input_sample: bool = False,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
mid_block_type: str = "UNetMidBlock3DCrossAttn",
up_block_types: Tuple[str] = (
"UpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D"
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: int = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
# Additional
use_motion_module = False,
motion_module_resolutions = ( 1,2,4,8 ),
motion_module_mid_block = False,
motion_module_decoder_only = False,
motion_module_type = None,
motion_module_kwargs = {},
unet_use_cross_frame_attention = None,
unet_use_temporal_attention = None,
):
super().__init__()
self.sample_size = sample_size
time_embed_dim = block_out_channels[0] * 4
# input
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
# class embedding
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
else:
self.class_embedding = None
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
only_cross_attention = [only_cross_attention] * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
res = 2 ** i
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[i],
downsample_padding=downsample_padding,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.down_blocks.append(down_block)
# mid
if mid_block_type == "UNetMidBlock3DCrossAttn":
self.mid_block = UNetMidBlock3DCrossAttn(
in_channels=block_out_channels[-1],
temb_channels=time_embed_dim,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
output_scale_factor=mid_block_scale_factor,
resnet_time_scale_shift=resnet_time_scale_shift,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[-1],
resnet_groups=norm_num_groups,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module and motion_module_mid_block,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
else:
raise ValueError(f"unknown mid_block_type : {mid_block_type}")
# count how many layers upsample the videos
self.num_upsamplers = 0
# up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_attention_head_dim = list(reversed(attention_head_dim))
only_cross_attention = list(reversed(only_cross_attention))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
res = 2 ** (3 - i)
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=reversed_attention_head_dim[i],
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
unet_use_cross_frame_attention=unet_use_cross_frame_attention,
unet_use_temporal_attention=unet_use_temporal_attention,
use_motion_module=use_motion_module and (res in motion_module_resolutions),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)
self.conv_act = nn.SiLU()
self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_slicable_layers * [1]
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def _set_gradient_checkpointing(self, module, value=False): | if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)): | 2 | 2023-11-21 08:33:54+00:00 | 12k |
luciddreamer-cvlab/LucidDreamer | scene/dataset_readers.py | [
{
"identifier": "BasicPointCloud",
"path": "scene/gaussian_model.py",
"snippet": "class GaussianModel:\n def setup_functions(self):\n def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):\n def __init__(self, sh_degree : int):\n def capture(self):\n def rest... | import os
import sys
import json
import imageio
import torch
import numpy as np
from typing import NamedTuple
from pathlib import Path
from PIL import Image
from plyfile import PlyData, PlyElement
from scene.gaussian_model import BasicPointCloud
from scene.cameras import MiniCam, Camera
from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
from utils.graphics import getWorld2View2, focal2fov, fov2focal
from utils.graphics import getProjectionMatrix
from utils.trajectory import get_camerapaths
from utils.sh import SH2RGB | 8,039 |
cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path='None', image_name='None', width=image.size[1], height=image.size[0]))
return cam_infos
def readNerfSyntheticInfo(path, white_background, eval, preset=None, extension=".png"):
print("Reading Training Transforms")
train_cam_infos = readCamerasFromTransforms(path, "transforms_train.json", white_background, extension)
print("Reading Test Transforms")
test_cam_infos = readCamerasFromTransforms(path, "transforms_test.json", white_background, extension)
if preset:
preset_cam_infos = readCamerasFromPreset('/home/chung/workspace/gaussian-splatting/poses_supplementary', f"{preset}.json")
else:
preset_cam_infos = None
if not eval:
train_cam_infos.extend(test_cam_infos)
test_cam_infos = []
nerf_normalization = getNerfppNorm(train_cam_infos)
ply_path = os.path.join(path, "points3d.ply")
if not os.path.exists(ply_path):
# Since this data set has no colmap data, we start with random points
num_pts = 100_000
print(f"Generating random point cloud ({num_pts})...")
# We create random points inside the bounds of the synthetic Blender scenes
xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3
shs = np.random.random((num_pts, 3)) / 255.0
pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3)))
storePly(ply_path, xyz, SH2RGB(shs) * 255)
try:
pcd = fetchPly(ply_path)
except:
pcd = None
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
preset_cameras=preset_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path)
return scene_info
def loadCamerasFromData(traindata, white_background):
cameras = []
fovx = traindata["camera_angle_x"]
frames = traindata["frames"]
for idx, frame in enumerate(frames):
# NeRF 'transform_matrix' is a camera-to-world transform
c2w = np.array(frame["transform_matrix"])
# change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
c2w[:3, 1:3] *= -1
# get the world-to-camera transform and set R, T
w2c = np.linalg.inv(c2w)
R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code
T = w2c[:3, 3]
image = frame["image"] if "image" in frame else None
im_data = np.array(image.convert("RGBA"))
bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0])
norm_data = im_data / 255.0
arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4])
image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB")
loaded_mask = np.ones_like(norm_data[:, :, 3:4])
fovy = focal2fov(fov2focal(fovx, image.size[1]), image.size[0])
FovY = fovy
FovX = fovx
image = torch.Tensor(arr).permute(2,0,1)
loaded_mask = None #torch.Tensor(loaded_mask).permute(2,0,1)
### torch로 바꿔야함
cameras.append(Camera(colmap_id=idx, R=R, T=T, FoVx=FovX, FoVy=FovY, image=image,
gt_alpha_mask=loaded_mask, image_name='', uid=idx, data_device='cuda'))
return cameras
def loadCameraPreset(traindata, presetdata):
cam_infos = {}
## camera setting (for H, W and focal)
fovx = traindata["camera_angle_x"] * 1.2
W, H = traindata["frames"][0]["image"].size
# W, H = traindata["W"], traindata["H"]
for camkey in presetdata:
cam_infos[camkey] = []
for idx, frame in enumerate(presetdata[camkey]["frames"]):
# NeRF 'transform_matrix' is a camera-to-world transform
c2w = np.array(frame["transform_matrix"])
# change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
c2w[:3, 1:3] *= -1
# get the world-to-camera transform and set R, T
w2c = np.linalg.inv(c2w)
R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code
T = w2c[:3, 3]
fovy = focal2fov(fov2focal(fovx, W), H)
FovY = fovy
FovX = fovx
znear, zfar = 0.01, 100
world_view_transform = torch.tensor(getWorld2View2(R, T, np.array([0.0, 0.0, 0.0]), 1.0)).transpose(0, 1).cuda()
projection_matrix = getProjectionMatrix(znear=znear, zfar=zfar, fovX=FovX, fovY=FovY).transpose(0,1).cuda()
full_proj_transform = (world_view_transform.unsqueeze(0).bmm(projection_matrix.unsqueeze(0))).squeeze(0)
| #
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact george.drettakis@inria.fr
#
class CameraInfo(NamedTuple):
uid: int
R: np.array
T: np.array
FovY: np.array
FovX: np.array
image: np.array
image_path: str
image_name: str
width: int
height: int
class SceneInfo(NamedTuple):
point_cloud: BasicPointCloud
train_cameras: list
test_cameras: list
preset_cameras: list
nerf_normalization: dict
ply_path: str
def getNerfppNorm(cam_info):
def get_center_and_diag(cam_centers):
cam_centers = np.hstack(cam_centers)
avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
center = avg_cam_center
dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
diagonal = np.max(dist)
return center.flatten(), diagonal
cam_centers = []
for cam in cam_info:
W2C = getWorld2View2(cam.R, cam.T)
C2W = np.linalg.inv(W2C)
cam_centers.append(C2W[:3, 3:4])
center, diagonal = get_center_and_diag(cam_centers)
radius = diagonal * 1.1
translate = -center
return {"translate": translate, "radius": radius}
def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder):
cam_infos = []
for idx, key in enumerate(cam_extrinsics):
sys.stdout.write('\r')
# the exact output you're looking for:
sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics)))
sys.stdout.flush()
extr = cam_extrinsics[key]
intr = cam_intrinsics[extr.camera_id]
height = intr.height
width = intr.width
uid = intr.id
R = np.transpose(qvec2rotmat(extr.qvec))
T = np.array(extr.tvec)
if intr.model=="SIMPLE_PINHOLE":
focal_length_x = intr.params[0]
FovY = focal2fov(focal_length_x, height)
FovX = focal2fov(focal_length_x, width)
elif intr.model=="PINHOLE":
focal_length_x = intr.params[0]
focal_length_y = intr.params[1]
FovY = focal2fov(focal_length_y, height)
FovX = focal2fov(focal_length_x, width)
else:
assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"
image_path = os.path.join(images_folder, os.path.basename(extr.name))
image_name = os.path.basename(image_path).split(".")[0]
image = Image.open(image_path)
cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=width, height=height)
cam_infos.append(cam_info)
sys.stdout.write('\n')
return cam_infos
def fetchPly(path):
plydata = PlyData.read(path)
vertices = plydata['vertex']
idx = np.random.choice(len(vertices['x']),size=(min(len(vertices['x']), 100_000),),replace=False)
positions = np.vstack([vertices['x'][idx], vertices['y'][idx], vertices['z'][idx]]).T if 'x' in vertices else None
colors = np.vstack([vertices['red'][idx], vertices['green'][idx], vertices['blue'][idx]]).T / 255.0 if 'red' in vertices else None
normals = np.vstack([vertices['nx'][idx], vertices['ny'][idx], vertices['nz'][idx]]).T if 'nx' in vertices else None
return BasicPointCloud(points=positions, colors=colors, normals=normals)
def storePly(path, xyz, rgb):
# Define the dtype for the structured array
dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
normals = np.zeros_like(xyz)
elements = np.empty(xyz.shape[0], dtype=dtype)
attributes = np.concatenate((xyz, normals, rgb), axis=1)
elements[:] = list(map(tuple, attributes))
# Create the PlyData object and write to file
vertex_element = PlyElement.describe(elements, 'vertex')
ply_data = PlyData([vertex_element])
ply_data.write(path)
def readColmapSceneInfo(path, images, eval, preset=None, llffhold=8):
try:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin")
cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file)
except:
cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt")
cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt")
cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file)
cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file)
reading_dir = "images" if images == None else images
cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir))
cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name)
if eval:
# train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0]
# test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0]
train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % 5 == 2 or idx % 5 == 0]
test_cam_infos = [c for idx, c in enumerate(cam_infos) if not (idx % 5 == 2 or idx % 5 == 0)]
else:
train_cam_infos = cam_infos
test_cam_infos = []
nerf_normalization = getNerfppNorm(train_cam_infos)
ply_path = os.path.join(path, "sparse/0/points3D.ply")
bin_path = os.path.join(path, "sparse/0/points3D.bin")
txt_path = os.path.join(path, "sparse/0/points3D.txt")
if not os.path.exists(ply_path):
print("Converting point3d.bin to .ply, will happen only the first time you open the scene.")
try:
xyz, rgb, _ = read_points3D_binary(bin_path)
except:
xyz, rgb, _ = read_points3D_text(txt_path)
storePly(ply_path, xyz, rgb)
try:
pcd = fetchPly(ply_path)
except:
pcd = None
if preset:
preset_cam_infos = readCamerasFromPreset('/home/chung/workspace/gaussian-splatting/poses_supplementary', f"{preset}.json")
else:
preset_cam_infos = None
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
preset_cameras=preset_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path)
return scene_info
def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"):
cam_infos = []
with open(os.path.join(path, transformsfile)) as json_file:
contents = json.load(json_file)
fovx = contents["camera_angle_x"]
frames = contents["frames"]
for idx, frame in enumerate(frames):
cam_name = os.path.join(path, frame["file_path"] + extension)
# NeRF 'transform_matrix' is a camera-to-world transform
c2w = np.array(frame["transform_matrix"])
# change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
c2w[:3, 1:3] *= -1
# get the world-to-camera transform and set R, T
w2c = np.linalg.inv(c2w)
R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code
T = w2c[:3, 3]
image_path = os.path.join(path, cam_name)
image_name = Path(cam_name).stem
image = Image.open(image_path)
# if os.path.exists(os.path.join(path, frame["file_path"].replace("/train/", "/depths_train/")+'.npy')):
# depth = np.load(os.path.join(path, frame["file_path"].replace("/train/", "/depths_train/")+'.npy'))
# if os.path.exists(os.path.join(path, frame["file_path"].replace("/train/", "/masks_train/")+'.png')):
# mask = imageio.v3.imread(os.path.join(path, frame["file_path"].replace("/train/", "/masks_train/")+'.png'))[:,:,0]/255.
# else:
# mask = np.ones_like(depth)
# final_depth = depth*mask
# else:
# final_depth = None
im_data = np.array(image.convert("RGBA"))
bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0])
norm_data = im_data / 255.0
arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4])
image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB")
fovy = focal2fov(fov2focal(fovx, image.size[1]), image.size[0])
FovY = fovy
FovX = fovx
cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path=image_path, image_name=image_name, width=image.size[0], height=image.size[1]))
return cam_infos
def readCamerasFromPreset(path, transformsfile):
cam_infos = []
with open(os.path.join(path, transformsfile)) as json_file:
contents = json.load(json_file)
FOV = contents["camera_angle_x"]*1.2
frames = contents["frames"]
for idx, frame in enumerate(frames):
# NeRF 'transform_matrix' is a camera-to-world transform
c2w = np.array(frame["transform_matrix"])
# change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
c2w[:3, 1:3] *= -1
# get the world-to-camera transform and set R, T
w2c = np.linalg.inv(np.concatenate((c2w, np.array([0,0,0,1]).reshape(1,4)), axis=0))
R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code
T = w2c[:3, 3]
# R = c2w[:3,:3]
# T = - np.transpose(R).dot(c2w[:3,3])
image = Image.fromarray(np.zeros((512,512)), "RGB")
FovY = focal2fov(fov2focal(FOV, 512), image.size[0])
FovX = focal2fov(fov2focal(FOV, 512), image.size[1])
# FovX, FovY = contents["camera_angle_x"], contents["camera_angle_x"]
cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
image_path='None', image_name='None', width=image.size[1], height=image.size[0]))
return cam_infos
def readNerfSyntheticInfo(path, white_background, eval, preset=None, extension=".png"):
print("Reading Training Transforms")
train_cam_infos = readCamerasFromTransforms(path, "transforms_train.json", white_background, extension)
print("Reading Test Transforms")
test_cam_infos = readCamerasFromTransforms(path, "transforms_test.json", white_background, extension)
if preset:
preset_cam_infos = readCamerasFromPreset('/home/chung/workspace/gaussian-splatting/poses_supplementary', f"{preset}.json")
else:
preset_cam_infos = None
if not eval:
train_cam_infos.extend(test_cam_infos)
test_cam_infos = []
nerf_normalization = getNerfppNorm(train_cam_infos)
ply_path = os.path.join(path, "points3d.ply")
if not os.path.exists(ply_path):
# Since this data set has no colmap data, we start with random points
num_pts = 100_000
print(f"Generating random point cloud ({num_pts})...")
# We create random points inside the bounds of the synthetic Blender scenes
xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3
shs = np.random.random((num_pts, 3)) / 255.0
pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3)))
storePly(ply_path, xyz, SH2RGB(shs) * 255)
try:
pcd = fetchPly(ply_path)
except:
pcd = None
scene_info = SceneInfo(point_cloud=pcd,
train_cameras=train_cam_infos,
test_cameras=test_cam_infos,
preset_cameras=preset_cam_infos,
nerf_normalization=nerf_normalization,
ply_path=ply_path)
return scene_info
def loadCamerasFromData(traindata, white_background):
cameras = []
fovx = traindata["camera_angle_x"]
frames = traindata["frames"]
for idx, frame in enumerate(frames):
# NeRF 'transform_matrix' is a camera-to-world transform
c2w = np.array(frame["transform_matrix"])
# change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
c2w[:3, 1:3] *= -1
# get the world-to-camera transform and set R, T
w2c = np.linalg.inv(c2w)
R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code
T = w2c[:3, 3]
image = frame["image"] if "image" in frame else None
im_data = np.array(image.convert("RGBA"))
bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0])
norm_data = im_data / 255.0
arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4])
image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB")
loaded_mask = np.ones_like(norm_data[:, :, 3:4])
fovy = focal2fov(fov2focal(fovx, image.size[1]), image.size[0])
FovY = fovy
FovX = fovx
image = torch.Tensor(arr).permute(2,0,1)
loaded_mask = None #torch.Tensor(loaded_mask).permute(2,0,1)
### torch로 바꿔야함
cameras.append(Camera(colmap_id=idx, R=R, T=T, FoVx=FovX, FoVy=FovY, image=image,
gt_alpha_mask=loaded_mask, image_name='', uid=idx, data_device='cuda'))
return cameras
def loadCameraPreset(traindata, presetdata):
cam_infos = {}
## camera setting (for H, W and focal)
fovx = traindata["camera_angle_x"] * 1.2
W, H = traindata["frames"][0]["image"].size
# W, H = traindata["W"], traindata["H"]
for camkey in presetdata:
cam_infos[camkey] = []
for idx, frame in enumerate(presetdata[camkey]["frames"]):
# NeRF 'transform_matrix' is a camera-to-world transform
c2w = np.array(frame["transform_matrix"])
# change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
c2w[:3, 1:3] *= -1
# get the world-to-camera transform and set R, T
w2c = np.linalg.inv(c2w)
R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code
T = w2c[:3, 3]
fovy = focal2fov(fov2focal(fovx, W), H)
FovY = fovy
FovX = fovx
znear, zfar = 0.01, 100
world_view_transform = torch.tensor(getWorld2View2(R, T, np.array([0.0, 0.0, 0.0]), 1.0)).transpose(0, 1).cuda()
projection_matrix = getProjectionMatrix(znear=znear, zfar=zfar, fovX=FovX, fovY=FovY).transpose(0,1).cuda()
full_proj_transform = (world_view_transform.unsqueeze(0).bmm(projection_matrix.unsqueeze(0))).squeeze(0)
| cam_infos[camkey].append(MiniCam(width=W, height=H, fovy=FovY, fovx=FovX, znear=znear, zfar=zfar, | 1 | 2023-11-22 06:54:32+00:00 | 12k |
AILab-CVC/UniRepLKNet | Video/run_class_finetuning.py | [
{
"identifier": "LayerDecayValueAssigner",
"path": "optim_factory.py",
"snippet": "class LayerDecayValueAssigner(object):\n def __init__(self, values):\n self.values = values\n\n def get_scale(self, layer_id):\n return self.values[layer_id]\n\n def get_layer_id(self, var_name):\n ... | import argparse
import datetime
import json
import os
import random
import time
import deepspeed
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import models # noqa: F401
import utils
from collections import OrderedDict
from functools import partial
from pathlib import Path
from timm.data.mixup import Mixup
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.models import create_model
from timm.utils import ModelEma
from dataset import build_dataset
from engine_for_finetuning import (
final_test,
merge,
train_one_epoch,
validation_one_epoch,
)
from optim_factory import (
LayerDecayValueAssigner,
create_optimizer,
get_parameter_groups,
)
from utils import NativeScalerWithGradNormCount as NativeScaler
from utils import multiple_samples_collate
from unireplknet import UniRepLKNet | 7,622 | 0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens,
size=(new_size, new_size),
mode='bicubic',
align_corners=False)
# BT, C, H, W -> BT, H, W, C -> B, T, H, W, C
pos_tokens = pos_tokens.permute(0, 2, 3, 1).reshape(
-1, args.num_frames // model.patch_embed.tubelet_size,
new_size, new_size, embedding_size)
pos_tokens = pos_tokens.flatten(1, 3) # B, L, C
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
elif args.input_size != 224:
pos_tokens = model.pos_embed
org_num_frames = 16
T = org_num_frames // args.tubelet_size
P = int((pos_tokens.shape[1] // T)**0.5)
C = pos_tokens.shape[2]
new_P = args.input_size // patch_size[0]
# B, L, C -> BT, H, W, C -> BT, C, H, W
pos_tokens = pos_tokens.reshape(-1, T, P, P, C)
pos_tokens = pos_tokens.reshape(-1, P, P, C).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens,
size=(new_P, new_P),
mode='bicubic',
align_corners=False)
# BT, C, H, W -> BT, H, W, C -> B, T, H, W, C
pos_tokens = pos_tokens.permute(0, 2, 3,
1).reshape(-1, T, new_P, new_P, C)
pos_tokens = pos_tokens.flatten(1, 3) # B, L, C
model.pos_embed = pos_tokens # update
if args.num_frames != 16:
org_num_frames = 16
T = org_num_frames // args.tubelet_size
pos_tokens = model.pos_embed
new_T = args.num_frames // args.tubelet_size
P = int((pos_tokens.shape[1] // T)**0.5)
C = pos_tokens.shape[2]
pos_tokens = pos_tokens.reshape(-1, T, P, P, C)
pos_tokens = pos_tokens.permute(0, 2, 3, 4,
1).reshape(-1, C, T) # BHW,C,T
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=new_T, mode='linear')
pos_tokens = pos_tokens.reshape(1, P, P, C,
new_T).permute(0, 4, 1, 2, 3)
pos_tokens = pos_tokens.flatten(1, 3)
model.pos_embed = pos_tokens # update
utils.load_state_dict(
model, checkpoint_model, prefix=args.model_prefix)
default_kernel_sizes = [[3, 3, 3], [13, 13, 13], [13, 3, 3, 13, 3, 3, 13, 3, 3, 13, 3, 3, 13, 3, 3, 13, 3, 3, 13, 3, 3, 13, 3, 3, 13, 3, 3], [11, 11, 11]]
model = UniRepLKNet(num_classes=400, depths=[3, 3, 27, 3], dims=[128,256,512,1024], drop_path_rate=0.4,
kernel_sizes=default_kernel_sizes,
custom_set='nolk', disable_iGEMM=True)
model.to(device)
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
print("Using EMA with decay = %.8f" % args.model_ema_decay)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters()
if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params:', n_parameters)
total_batch_size = args.batch_size * args.update_freq * num_tasks
num_training_steps_per_epoch = len(dataset_train) // total_batch_size
args.lr = args.lr * total_batch_size / 256
#########scale the lr#############
args.min_lr = args.min_lr * total_batch_size / 256
args.warmup_lr = args.warmup_lr * total_batch_size / 256
#########scale the lr#############
print("LR = %.8f" % args.lr)
print("Batch size = %d" % total_batch_size)
print("Update frequent = %d" % args.update_freq)
print("Number of training examples = %d" % len(dataset_train))
print("Number of training training per epoch = %d" %
num_training_steps_per_epoch)
# num_layers = model_without_ddp.get_num_layers()
# num_layers = 13
# if args.layer_decay < 1.0:
# assigner = LayerDecayValueAssigner(
# list(args.layer_decay**(num_layers + 1 - i)
# for i in range(num_layers + 2)))
# else:
# assigner = None
# if assigner is not None:
# print("Assigned values = %s" % str(assigner.values))
# if args.layer_decay < 1.0 or args.layer_decay > 1.0:
# num_layers = 12
# # set lower learning rate for lower-level layers.
# # follow the implementation in the code of ConvNeXt and BeiT
# assigner = RepLKNetLayerDecayValueAssigner(list(args.layer_decay ** (num_layers + 1 - i) for i in range(num_layers + 2)))
# else:
assigner = None
if assigner is None and (args.layer_decay < 1.0 or args.layer_decay > 1.0):
num_layers = 12 # convnext layers divided into 12 parts, each with a different decayed lr value.
assigner = LayerDecayValueAssigner(list(args.layer_decay ** (num_layers + 1 - i) for i in range(num_layers + 2)))
else:
assigner = None
if assigner is not None:
print("Assigned values = %s" % str(assigner.values))
| # --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
# NOTE: Do not comment `import models`, it is used to register models
def get_args():
parser = argparse.ArgumentParser(
'VideoMAE fine-tuning and evaluation script for action classification',
add_help=False)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--epochs', default=30, type=int)
parser.add_argument('--update_freq', default=1, type=int)
parser.add_argument('--save_ckpt_freq', default=100, type=int)
# Model parameters
parser.add_argument(
'--model',
default='vit_base_patch16_224',
type=str,
metavar='MODEL',
help='Name of model to train')
parser.add_argument('--tubelet_size', type=int, default=2)
parser.add_argument(
'--input_size', default=224, type=int, help='images input size')
parser.add_argument(
'--with_checkpoint', action='store_true', default=False)
parser.add_argument(
'--drop',
type=float,
default=0.0,
metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument(
'--attn_drop_rate',
type=float,
default=0.0,
metavar='PCT',
help='Attention dropout rate (default: 0.)')
parser.add_argument(
'--drop_path',
type=float,
default=0.1,
metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument(
'--head_drop_rate',
type=float,
default=0.0,
metavar='PCT',
help='cls head dropout rate (default: 0.)')
parser.add_argument(
'--disable_eval_during_finetuning', action='store_true', default=False)
parser.add_argument('--model_ema', action='store_true', default=False)
parser.add_argument(
'--model_ema_decay', type=float, default=0.9999, help='')
parser.add_argument(
'--model_ema_force_cpu', action='store_true', default=False, help='')
# Optimizer parameters
parser.add_argument(
'--opt',
default='adamw',
type=str,
metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument(
'--opt_eps',
default=1e-8,
type=float,
metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument(
'--opt_betas',
default=None,
type=float,
nargs='+',
metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument(
'--clip_grad',
type=float,
default=None,
metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument(
'--momentum',
type=float,
default=0.9,
metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument(
'--weight_decay',
type=float,
default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument(
'--weight_decay_end',
type=float,
default=None,
help="""Final value of the
weight decay. We use a cosine schedule for WD and using a larger decay by
the end of training improves performance for ViTs.""")
parser.add_argument(
'--lr',
type=float,
default=1e-3,
metavar='LR',
help='learning rate (default: 1e-3)')
parser.add_argument('--layer_decay', type=float, default=0.75)
parser.add_argument(
'--warmup_lr',
type=float,
default=1e-8,
metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument(
'--min_lr',
type=float,
default=1e-6,
metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument(
'--warmup_epochs',
type=int,
default=5,
metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument(
'--warmup_steps',
type=int,
default=-1,
metavar='N',
help='num of steps to warmup LR, will overload warmup_epochs if set > 0'
)
# Augmentation parameters
parser.add_argument(
'--color_jitter',
type=float,
default=0.4,
metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument(
'--num_sample', type=int, default=2, help='Repeated_aug (default: 2)')
parser.add_argument(
'--aa',
type=str,
default='rand-m7-n4-mstd0.5-inc1',
metavar='NAME',
help=
'Use AutoAugment policy. "v0" or "original". " + "(default: rand-m7-n4-mstd0.5-inc1)'
),
parser.add_argument(
'--smoothing',
type=float,
default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument(
'--train_interpolation',
type=str,
default='bicubic',
help=
'Training interpolation (random, bilinear, bicubic default: "bicubic")'
)
# Evaluation parameters
parser.add_argument('--crop_pct', type=float, default=None)
parser.add_argument('--short_side_size', type=int, default=224)
parser.add_argument('--test_num_segment', type=int, default=10)
parser.add_argument('--test_num_crop', type=int, default=3)
# * Random Erase params
parser.add_argument(
'--reprob',
type=float,
default=0.25,
metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument(
'--remode',
type=str,
default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument(
'--recount',
type=int,
default=1,
help='Random erase count (default: 1)')
parser.add_argument(
'--resplit',
action='store_true',
default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument(
'--mixup',
type=float,
default=0.8,
help='mixup alpha, mixup enabled if > 0.')
parser.add_argument(
'--cutmix',
type=float,
default=1.0,
help='cutmix alpha, cutmix enabled if > 0.')
parser.add_argument(
'--cutmix_minmax',
type=float,
nargs='+',
default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set')
parser.add_argument(
'--mixup_prob',
type=float,
default=1.0,
help=
'Probability of performing mixup or cutmix when either/both is enabled'
)
parser.add_argument(
'--mixup_switch_prob',
type=float,
default=0.5,
help=
'Probability of switching to cutmix when both mixup and cutmix enabled'
)
parser.add_argument(
'--mixup_mode',
type=str,
default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"'
)
# * Finetuning params
parser.add_argument(
'--finetune', default='', help='finetune from checkpoint')
parser.add_argument('--model_key', default='model|module', type=str)
parser.add_argument('--model_prefix', default='', type=str)
parser.add_argument('--init_scale', default=0.001, type=float)
parser.add_argument('--use_mean_pooling', action='store_true')
parser.set_defaults(use_mean_pooling=True)
parser.add_argument(
'--use_cls', action='store_false', dest='use_mean_pooling')
# Dataset parameters
parser.add_argument(
'--data_path',
default='/your/data/path/',
type=str,
help='dataset path')
parser.add_argument(
'--data_root', default='', type=str, help='dataset path root')
parser.add_argument(
'--eval_data_path',
default=None,
type=str,
help='dataset path for evaluation')
parser.add_argument(
'--nb_classes',
default=400,
type=int,
help='number of the classification types')
parser.add_argument(
'--imagenet_default_mean_and_std', default=True, action='store_true')
parser.add_argument('--num_segments', type=int, default=1)
parser.add_argument('--num_frames', type=int, default=16)
parser.add_argument('--sampling_rate', type=int, default=4)
parser.add_argument('--sparse_sample', default=False, action='store_true')
parser.add_argument(
'--data_set',
default='Kinetics-400',
choices=[
'Kinetics-400', 'Kinetics-600', 'Kinetics-700', 'SSV2', 'UCF101',
'HMDB51', 'Diving48', 'Kinetics-710', 'MIT'
],
type=str,
help='dataset')
parser.add_argument(
'--fname_tmpl',
default='img_{:05}.jpg',
type=str,
help='filename_tmpl for rawframe dataset')
parser.add_argument(
'--start_idx',
default=1,
type=int,
help='start_idx for rwaframe dataset')
parser.add_argument(
'--output_dir',
default='',
help='path where to save, empty for no saving')
parser.add_argument(
'--log_dir', default=None, help='path where to tensorboard log')
parser.add_argument(
'--device',
default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--auto_resume', action='store_true')
parser.add_argument(
'--no_auto_resume', action='store_false', dest='auto_resume')
parser.set_defaults(auto_resume=True)
parser.add_argument('--save_ckpt', action='store_true')
parser.add_argument(
'--no_save_ckpt', action='store_false', dest='save_ckpt')
parser.set_defaults(save_ckpt=True)
parser.add_argument(
'--start_epoch', default=0, type=int, metavar='N', help='start epoch')
parser.add_argument(
'--eval', action='store_true', help='Perform evaluation only')
parser.add_argument(
'--validation', action='store_true', help='Perform validation only')
parser.add_argument(
'--dist_eval',
action='store_true',
default=False,
help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument(
'--pin_mem',
action='store_true',
help=
'Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.'
)
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument(
'--world_size',
default=1,
type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument(
'--dist_url',
default='env://',
help='url used to set up distributed training')
parser.add_argument(
'--enable_deepspeed', action='store_true', default=False)
known_args, _ = parser.parse_known_args()
if known_args.enable_deepspeed:
parser = deepspeed.add_config_arguments(parser)
ds_init = deepspeed.initialize
else:
ds_init = None
return parser.parse_args(), ds_init
def main(args, ds_init):
utils.init_distributed_mode(args)
if ds_init is not None:
utils.create_ds_config(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(
is_train=True, test_mode=False, args=args)
if args.disable_eval_during_finetuning:
dataset_val = None
else:
dataset_val, _ = build_dataset(
is_train=False, test_mode=False, args=args)
dataset_test, _ = build_dataset(is_train=False, test_mode=True, args=args)
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True)
print("Sampler_train = %s" % str(sampler_train))
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print(
'Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val,
num_replicas=num_tasks,
rank=global_rank,
shuffle=False)
sampler_test = torch.utils.data.DistributedSampler(
dataset_test,
num_replicas=num_tasks,
rank=global_rank,
shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
if global_rank == 0 and args.log_dir is not None:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = utils.TensorboardLogger(log_dir=args.log_dir)
else:
log_writer = None
if args.num_sample > 1:
collate_func = partial(multiple_samples_collate, fold=False)
else:
collate_func = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train,
sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
collate_fn=collate_func,
persistent_workers=True)
if dataset_val is not None:
data_loader_val = torch.utils.data.DataLoader(
dataset_val,
sampler=sampler_val,
# batch_size=int(1.5 * args.batch_size),
batch_size=int(args.batch_size),
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False,
persistent_workers=True)
else:
data_loader_val = None
if dataset_test is not None:
data_loader_test = torch.utils.data.DataLoader(
dataset_test,
sampler=sampler_test,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False,
persistent_workers=True)
else:
data_loader_test = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
print("Mixup is activated!")
mixup_fn = Mixup(
mixup_alpha=args.mixup,
cutmix_alpha=args.cutmix,
cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob,
switch_prob=args.mixup_switch_prob,
mode=args.mixup_mode,
label_smoothing=args.smoothing,
num_classes=args.nb_classes)
model = create_model(
args.model,
img_size=args.input_size,
pretrained=False,
num_classes=args.nb_classes,
all_frames=args.num_frames * args.num_segments,
tubelet_size=args.tubelet_size,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
attn_drop_rate=args.attn_drop_rate,
head_drop_rate=args.head_drop_rate,
drop_block_rate=None,
use_mean_pooling=args.use_mean_pooling,
init_scale=args.init_scale,
with_cp=args.with_checkpoint,
)
patch_size = model.patch_embed.patch_size
print("Patch size = %s" % str(patch_size))
args.window_size = (args.num_frames // args.tubelet_size,
args.input_size // patch_size[0],
args.input_size // patch_size[1])
args.patch_size = patch_size
if args.finetune:
if args.finetune.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.finetune, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.finetune, map_location='cpu')
print("Load ckpt from %s" % args.finetune)
checkpoint_model = None
for model_key in args.model_key.split('|'):
if model_key in checkpoint:
checkpoint_model = checkpoint[model_key]
print("Load state_dict by model_key = %s" % model_key)
break
if checkpoint_model is None:
checkpoint_model = checkpoint
for old_key in list(checkpoint_model.keys()):
if old_key.startswith('_orig_mod.'):
new_key = old_key[10:]
checkpoint_model[new_key] = checkpoint_model.pop(old_key)
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[
k].shape != state_dict[k].shape:
if checkpoint_model[k].shape[
0] == 710 and args.data_set.startswith('Kinetics'):
print(f'Convert K710 head to {args.data_set} head')
if args.data_set == 'Kinetics-400':
label_map_path = 'misc/label_710to400.json'
elif args.data_set == 'Kinetics-600':
label_map_path = 'misc/label_710to600.json'
elif args.data_set == 'Kinetics-700':
label_map_path = 'misc/label_710to700.json'
label_map = json.load(open(label_map_path))
checkpoint_model[k] = checkpoint_model[k][label_map]
else:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
all_keys = list(checkpoint_model.keys())
new_dict = OrderedDict()
for key in all_keys:
if key.startswith('backbone.'):
new_dict[key[9:]] = checkpoint_model[key]
elif key.startswith('encoder.'):
new_dict[key[8:]] = checkpoint_model[key]
else:
new_dict[key] = checkpoint_model[key]
checkpoint_model = new_dict
# interpolate position embedding
if 'pos_embed' in checkpoint_model:
pos_embed_checkpoint = checkpoint_model['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1] # channel dim
num_patches = model.patch_embed.num_patches #
num_extra_tokens = model.pos_embed.shape[-2] - num_patches # 0/1
# height (== width) for the checkpoint position embedding
orig_size = int(
((pos_embed_checkpoint.shape[-2] - num_extra_tokens) //
(args.num_frames // model.patch_embed.tubelet_size))**0.5)
# height (== width) for the new position embedding
new_size = int(
(num_patches //
(args.num_frames // model.patch_embed.tubelet_size))**0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
print("Position interpolate from %dx%d to %dx%d" %
(orig_size, orig_size, new_size, new_size))
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
# B, L, C -> BT, H, W, C -> BT, C, H, W
pos_tokens = pos_tokens.reshape(
-1, args.num_frames // model.patch_embed.tubelet_size,
orig_size, orig_size, embedding_size)
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size,
embedding_size).permute(
0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens,
size=(new_size, new_size),
mode='bicubic',
align_corners=False)
# BT, C, H, W -> BT, H, W, C -> B, T, H, W, C
pos_tokens = pos_tokens.permute(0, 2, 3, 1).reshape(
-1, args.num_frames // model.patch_embed.tubelet_size,
new_size, new_size, embedding_size)
pos_tokens = pos_tokens.flatten(1, 3) # B, L, C
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
elif args.input_size != 224:
pos_tokens = model.pos_embed
org_num_frames = 16
T = org_num_frames // args.tubelet_size
P = int((pos_tokens.shape[1] // T)**0.5)
C = pos_tokens.shape[2]
new_P = args.input_size // patch_size[0]
# B, L, C -> BT, H, W, C -> BT, C, H, W
pos_tokens = pos_tokens.reshape(-1, T, P, P, C)
pos_tokens = pos_tokens.reshape(-1, P, P, C).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens,
size=(new_P, new_P),
mode='bicubic',
align_corners=False)
# BT, C, H, W -> BT, H, W, C -> B, T, H, W, C
pos_tokens = pos_tokens.permute(0, 2, 3,
1).reshape(-1, T, new_P, new_P, C)
pos_tokens = pos_tokens.flatten(1, 3) # B, L, C
model.pos_embed = pos_tokens # update
if args.num_frames != 16:
org_num_frames = 16
T = org_num_frames // args.tubelet_size
pos_tokens = model.pos_embed
new_T = args.num_frames // args.tubelet_size
P = int((pos_tokens.shape[1] // T)**0.5)
C = pos_tokens.shape[2]
pos_tokens = pos_tokens.reshape(-1, T, P, P, C)
pos_tokens = pos_tokens.permute(0, 2, 3, 4,
1).reshape(-1, C, T) # BHW,C,T
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=new_T, mode='linear')
pos_tokens = pos_tokens.reshape(1, P, P, C,
new_T).permute(0, 4, 1, 2, 3)
pos_tokens = pos_tokens.flatten(1, 3)
model.pos_embed = pos_tokens # update
utils.load_state_dict(
model, checkpoint_model, prefix=args.model_prefix)
default_kernel_sizes = [[3, 3, 3], [13, 13, 13], [13, 3, 3, 13, 3, 3, 13, 3, 3, 13, 3, 3, 13, 3, 3, 13, 3, 3, 13, 3, 3, 13, 3, 3, 13, 3, 3], [11, 11, 11]]
model = UniRepLKNet(num_classes=400, depths=[3, 3, 27, 3], dims=[128,256,512,1024], drop_path_rate=0.4,
kernel_sizes=default_kernel_sizes,
custom_set='nolk', disable_iGEMM=True)
model.to(device)
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
print("Using EMA with decay = %.8f" % args.model_ema_decay)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters()
if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params:', n_parameters)
total_batch_size = args.batch_size * args.update_freq * num_tasks
num_training_steps_per_epoch = len(dataset_train) // total_batch_size
args.lr = args.lr * total_batch_size / 256
#########scale the lr#############
args.min_lr = args.min_lr * total_batch_size / 256
args.warmup_lr = args.warmup_lr * total_batch_size / 256
#########scale the lr#############
print("LR = %.8f" % args.lr)
print("Batch size = %d" % total_batch_size)
print("Update frequent = %d" % args.update_freq)
print("Number of training examples = %d" % len(dataset_train))
print("Number of training training per epoch = %d" %
num_training_steps_per_epoch)
# num_layers = model_without_ddp.get_num_layers()
# num_layers = 13
# if args.layer_decay < 1.0:
# assigner = LayerDecayValueAssigner(
# list(args.layer_decay**(num_layers + 1 - i)
# for i in range(num_layers + 2)))
# else:
# assigner = None
# if assigner is not None:
# print("Assigned values = %s" % str(assigner.values))
# if args.layer_decay < 1.0 or args.layer_decay > 1.0:
# num_layers = 12
# # set lower learning rate for lower-level layers.
# # follow the implementation in the code of ConvNeXt and BeiT
# assigner = RepLKNetLayerDecayValueAssigner(list(args.layer_decay ** (num_layers + 1 - i) for i in range(num_layers + 2)))
# else:
assigner = None
if assigner is None and (args.layer_decay < 1.0 or args.layer_decay > 1.0):
num_layers = 12 # convnext layers divided into 12 parts, each with a different decayed lr value.
assigner = LayerDecayValueAssigner(list(args.layer_decay ** (num_layers + 1 - i) for i in range(num_layers + 2)))
else:
assigner = None
if assigner is not None:
print("Assigned values = %s" % str(assigner.values))
| optimizer = create_optimizer( | 1 | 2023-11-24 07:28:22+00:00 | 12k |
wenquanlu/HandRefiner | ldm/models/diffusion/ddpm.py | [
{
"identifier": "log_txt_as_img",
"path": "ldm/util.py",
"snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ... | import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
import itertools
from torch.optim.lr_scheduler import LambdaLR
from einops import rearrange, repeat
from contextlib import contextmanager
from functools import partial
from tqdm import tqdm
from torchvision.utils import make_grid
from pytorch_lightning.utilities.distributed import rank_zero_only
from omegaconf import ListConfig
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from ldm.models.diffusion.ddim import DDIMSampler | 10,230 | https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.,
make_it_fit=False,
ucg_training=None,
reset_ema=False,
reset_num_ema_updates=False,
):
super().__init__()
assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
self.parameterization = parameterization
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
count_params(self.model, verbose=True)
self.use_ema = use_ema
if self.use_ema:
self.model_ema = LitEma(self.model)
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
self.use_scheduler = scheduler_config is not None
if self.use_scheduler:
self.scheduler_config = scheduler_config
self.v_posterior = v_posterior
self.original_elbo_weight = original_elbo_weight
self.l_simple_weight = l_simple_weight
if monitor is not None:
self.monitor = monitor
self.make_it_fit = make_it_fit
if reset_ema: assert exists(ckpt_path)
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
if reset_ema:
assert self.use_ema
print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
self.model_ema = LitEma(self.model)
if reset_num_ema_updates:
print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
assert self.use_ema
self.model_ema.reset_num_updates()
self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
self.loss_type = loss_type
self.learn_logvar = learn_logvar
logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
if self.learn_logvar:
self.logvar = nn.Parameter(self.logvar, requires_grad=True)
else:
self.register_buffer('logvar', logvar)
self.ucg_training = ucg_training or dict()
if self.ucg_training:
self.ucg_prng = np.random.RandomState()
def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
if exists(given_betas):
betas = given_betas
else:
| """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.,
make_it_fit=False,
ucg_training=None,
reset_ema=False,
reset_num_ema_updates=False,
):
super().__init__()
assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
self.parameterization = parameterization
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
count_params(self.model, verbose=True)
self.use_ema = use_ema
if self.use_ema:
self.model_ema = LitEma(self.model)
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
self.use_scheduler = scheduler_config is not None
if self.use_scheduler:
self.scheduler_config = scheduler_config
self.v_posterior = v_posterior
self.original_elbo_weight = original_elbo_weight
self.l_simple_weight = l_simple_weight
if monitor is not None:
self.monitor = monitor
self.make_it_fit = make_it_fit
if reset_ema: assert exists(ckpt_path)
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
if reset_ema:
assert self.use_ema
print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.")
self.model_ema = LitEma(self.model)
if reset_num_ema_updates:
print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ")
assert self.use_ema
self.model_ema.reset_num_updates()
self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
self.loss_type = loss_type
self.learn_logvar = learn_logvar
logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
if self.learn_logvar:
self.logvar = nn.Parameter(self.logvar, requires_grad=True)
else:
self.register_buffer('logvar', logvar)
self.ucg_training = ucg_training or dict()
if self.ucg_training:
self.ucg_prng = np.random.RandomState()
def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
if exists(given_betas):
betas = given_betas
else: | betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, | 13 | 2023-11-24 10:19:23+00:00 | 12k |
VITA-Group/LightGaussian | gaussian_renderer/gaussian_count.py | [
{
"identifier": "GaussianModel",
"path": "scene/gaussian_model.py",
"snippet": "class GaussianModel:\n def setup_functions(self):\n def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):\n L = build_scaling_rotation(scaling_modifier * scaling, rotation)\n ... | import torch
import math
from diff_gaussian_rasterization import (
GaussianRasterizationSettings,
GaussianRasterizer,
)
from scene.gaussian_model import GaussianModel
from utils.sh_utils import eval_sh | 8,771 | # base on __ini__.render
#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact george.drettakis@inria.fr
#
def count_render(
viewpoint_camera,
pc: GaussianModel,
pipe,
bg_color: torch.Tensor,
scaling_modifier=1.0,
override_color=None,
):
"""
Render the scene.
Background tensor (bg_color) must be on GPU!
"""
# Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means
screenspace_points = (
torch.zeros_like(
pc.get_xyz, dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda"
)
+ 0
)
try:
screenspace_points.retain_grad()
except:
pass
# Set up rasterization configuration
tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)
raster_settings = GaussianRasterizationSettings(
image_height=int(viewpoint_camera.image_height),
image_width=int(viewpoint_camera.image_width),
tanfovx=tanfovx,
tanfovy=tanfovy,
bg=bg_color,
scale_modifier=scaling_modifier,
viewmatrix=viewpoint_camera.world_view_transform,
projmatrix=viewpoint_camera.full_proj_transform,
sh_degree=pc.active_sh_degree,
campos=viewpoint_camera.camera_center,
prefiltered=False,
debug=pipe.debug,
)
rasterizer = GaussianRasterizer(raster_settings=raster_settings, f_count=True)
means3D = pc.get_xyz
means2D = screenspace_points
opacity = pc.get_opacity
# If precomputed 3d covariance is provided, use it. If not, then it will be computed from
# scaling / rotation by the rasterizer.
scales = None
rotations = None
cov3D_precomp = None
if pipe.compute_cov3D_python:
cov3D_precomp = pc.get_covariance(scaling_modifier)
else:
scales = pc.get_scaling
rotations = pc.get_rotation
# If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors
# from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer.
shs = None
colors_precomp = None
if override_color is None:
if pipe.convert_SHs_python:
shs_view = pc.get_features.transpose(1, 2).view(
-1, 3, (pc.max_sh_degree + 1) ** 2
)
dir_pp = pc.get_xyz - viewpoint_camera.camera_center.repeat(
pc.get_features.shape[0], 1
)
dir_pp_normalized = dir_pp / dir_pp.norm(dim=1, keepdim=True)
| # base on __ini__.render
#
# Copyright (C) 2023, Inria
# GRAPHDECO research group, https://team.inria.fr/graphdeco
# All rights reserved.
#
# This software is free for non-commercial, research and evaluation use
# under the terms of the LICENSE.md file.
#
# For inquiries contact george.drettakis@inria.fr
#
def count_render(
viewpoint_camera,
pc: GaussianModel,
pipe,
bg_color: torch.Tensor,
scaling_modifier=1.0,
override_color=None,
):
"""
Render the scene.
Background tensor (bg_color) must be on GPU!
"""
# Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means
screenspace_points = (
torch.zeros_like(
pc.get_xyz, dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda"
)
+ 0
)
try:
screenspace_points.retain_grad()
except:
pass
# Set up rasterization configuration
tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)
raster_settings = GaussianRasterizationSettings(
image_height=int(viewpoint_camera.image_height),
image_width=int(viewpoint_camera.image_width),
tanfovx=tanfovx,
tanfovy=tanfovy,
bg=bg_color,
scale_modifier=scaling_modifier,
viewmatrix=viewpoint_camera.world_view_transform,
projmatrix=viewpoint_camera.full_proj_transform,
sh_degree=pc.active_sh_degree,
campos=viewpoint_camera.camera_center,
prefiltered=False,
debug=pipe.debug,
)
rasterizer = GaussianRasterizer(raster_settings=raster_settings, f_count=True)
means3D = pc.get_xyz
means2D = screenspace_points
opacity = pc.get_opacity
# If precomputed 3d covariance is provided, use it. If not, then it will be computed from
# scaling / rotation by the rasterizer.
scales = None
rotations = None
cov3D_precomp = None
if pipe.compute_cov3D_python:
cov3D_precomp = pc.get_covariance(scaling_modifier)
else:
scales = pc.get_scaling
rotations = pc.get_rotation
# If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors
# from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer.
shs = None
colors_precomp = None
if override_color is None:
if pipe.convert_SHs_python:
shs_view = pc.get_features.transpose(1, 2).view(
-1, 3, (pc.max_sh_degree + 1) ** 2
)
dir_pp = pc.get_xyz - viewpoint_camera.camera_center.repeat(
pc.get_features.shape[0], 1
)
dir_pp_normalized = dir_pp / dir_pp.norm(dim=1, keepdim=True) | sh2rgb = eval_sh(pc.active_sh_degree, shs_view, dir_pp_normalized) | 1 | 2023-11-26 20:02:23+00:00 | 12k |
eth-sri/language-model-arithmetic | src/model_arithmetic/model_arithmetic.py | [
{
"identifier": "load_model",
"path": "src/model_arithmetic/basic_model_loader.py",
"snippet": "def load_model(dir_or_model, classification=False, token_classification=False, return_tokenizer=False, dtype=torch.bfloat16, load_dtype=True, \n rl=False, peft_config=None):\n \"\"\"\n Th... | from transformers import PreTrainedModel
from .basic_model_loader import load_model, load_tokenizer
from .utils import get_max_length, ENABLE_LOGGING, log
from collections import namedtuple
from transformers import top_k_top_p_filtering
from loguru import logger
from .operators import Operator
from .monitor import Monitor
from .runnable_operators import RunnableOperator, PromptedLLM
from .input import TokenizedInput
from .lm_eval_compatibility import Compatibility
import json
import numpy as np
import torch
import os
import time
import random | 10,659 |
class ModelArithmetic(PreTrainedModel):
"""
Main class for prompt arithmetic. Handles the generation of text based on the formula.
"""
SAVE_FILE = "prompt_arithmetic.json"
_supports_sdpa = True
|
class ModelArithmetic(PreTrainedModel):
"""
Main class for prompt arithmetic. Handles the generation of text based on the formula.
"""
SAVE_FILE = "prompt_arithmetic.json"
_supports_sdpa = True
| def __init__(self, formula : Operator, default_model : str = None, dtype=torch.bfloat16, intermediate_argmax : bool = False, epsilon = 1e-12, | 5 | 2023-11-21 20:01:08+00:00 | 12k |
huang-yh/SelfOcc | model/head/nerfacc_head/nerfacc_head.py | [
{
"identifier": "BaseTaskHead",
"path": "model/head/base_head.py",
"snippet": "class BaseTaskHead(BaseModule):\n \"\"\"Segmentation heads.\n image backbone -> neck -> lifter -> encoder -> segmentor\n Predicts semantic labels for voxels (and points for lidar segmentation).\n \"\"\"\n\n def... | import os
import nerfacc, torch, collections, math
from ..base_head import BaseTaskHead
from .ray_sampler import RaySampler
from .img2lidar import Img2LiDAR
from .bev_nerf import BEVNeRF
from .rendering import custom_rendering
from .estimator import CustomOccGridEstimator
from mmseg.models import HEADS
from mmengine.logging import MMLogger | 7,637 | logger = MMLogger.get_instance('selfocc')
def namedtuple_map(fn, tup):
"""Apply `fn` to each element of `tup` and cast to `tup`'s namedtuple."""
return type(tup)(*(None if x is None else fn(x) for x in tup))
Rays = collections.namedtuple("Rays", ("origins", "viewdirs"))
OCC_THRE = 0.0
@HEADS.register_module()
| logger = MMLogger.get_instance('selfocc')
def namedtuple_map(fn, tup):
"""Apply `fn` to each element of `tup` and cast to `tup`'s namedtuple."""
return type(tup)(*(None if x is None else fn(x) for x in tup))
Rays = collections.namedtuple("Rays", ("origins", "viewdirs"))
OCC_THRE = 0.0
@HEADS.register_module() | class NeRFAccHead(BaseTaskHead): | 0 | 2023-11-20 12:49:14+00:00 | 12k |
togethercomputer/stripedhyena | src/model.py | [
{
"identifier": "InferenceParams",
"path": "src/cache.py",
"snippet": "class InferenceParams:\n \"\"\"Inference parameters that are passed to the main model in order\n to efficienly calculate and store the context during inference.\"\"\"\n\n max_seqlen: int\n max_batch_size: int\n seqlen_... | import torch
import torch.nn as nn
import torch.nn.functional as F
from src.cache import InferenceParams, RecurrentInferenceParams
from src.engine import HyenaInferenceEngine
from src.layers import ParallelGatedMLP, RMSNorm, VocabParallelEmbedding
from src.utils import column_split, print_rank_0
from flash_attn.modules.mha import MHA
from flashfftconv import FlashFFTConv | 7,939 | h = (residues * (log_poles * self.t).exp()).real.sum(1)[None]
return h, filter_dtype, log_poles, residues
class ParallelGatedConvBlock(nn.Module):
def __init__(self, config, layer_idx) -> None:
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.low_mem_mode = config.get("low_mem_mode", False)
dtype = config.get("hyena_block_dtype", torch.float32)
mlp_dtype = config.get("mlp_dtype", torch.bfloat16)
self.pre_norm, self.post_norm = RMSNorm(config).to(dtype=dtype), RMSNorm(config).to(dtype=dtype)
self.filter = ParallelHyenaFilter(config, layer_idx).to(dtype=dtype)
self.projections = nn.Linear(config.hidden_size, 3 * config.hidden_size)
self.out_filter_dense = nn.Linear(config.hidden_size, config.hidden_size).to(dtype)
self.mlp = ParallelGatedMLP(config).to(dtype=mlp_dtype)
self.proj_norm_fn = self.proj_norm
self.res_mlp_norm_fn = self.res_mlp_norm
if self.config.get("compile", False):
self.proj_norm_fn = torch.compile(self.proj_norm, fullgraph=True, dynamic=False, mode="reduce-overhead")
self.res_mlp_norm_fn = torch.compile(
self.res_mlp_norm, fullgraph=True, dynamic=False, mode="reduce-overhead"
)
def proj_norm(self, x):
return self.projections(self.pre_norm(x))
def res_mlp_norm(self, x):
return self.mlp(self.post_norm(x)) + x
def forward(self, u, inference_params=None, padding_mask=None, *args, **kwargs):
z = self.proj_norm_fn(u)
if type(padding_mask) == torch.Tensor: # guard against bias
z = z * padding_mask[..., None]
z, inference_params = self.filter(z, inference_params=inference_params, padding_mask=padding_mask)
z_in = self.out_filter_dense(z) + u
if type(padding_mask) == torch.Tensor: # guard against bias
z_in = z_in * padding_mask[..., None]
y = self.res_mlp_norm_fn(z_in)
return y, inference_params
def get_block(config, layer_idx, flash_fft=None):
if layer_idx in config.attn_layer_idxs:
return AttentionBlock(config, layer_idx)
elif layer_idx in config.hyena_layer_idxs:
block = ParallelGatedConvBlock(config, layer_idx)
if config.get("use_flashfft", "False"):
block.filter.fftconv_fn = flash_fft
return block
else:
raise NotImplementedError
class StripedHyena(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.embedding_layer = VocabParallelEmbedding(config)
self.norm = RMSNorm(config) if config.get("final_norm", True) else None
self.unembed = self.emb if config.tie_embeddings else VocabParallelEmbedding(config)
if config.get("use_flashfft", "False"):
self.flash_fft = FlashFFTConv(2 * config.seqlen, dtype=torch.bfloat16)
else:
self.flash_fft = None
self.blocks = nn.ModuleList(
get_block(config, layer_idx, flash_fft=self.flash_fft) for layer_idx in range(config.num_layers)
)
def forward(self, x, inference_params_dict=None, padding_mask=None):
L = x.shape[1]
x = self.embedding_layer.embed(x)
if inference_params_dict is not None:
x, inference_params_dict_out = self.stateful_forward(
x,
inference_params_dict=inference_params_dict,
)
else:
x, inference_params_dict_out = self.stateless_forward(x, padding_mask=padding_mask)
x = self.norm(x)
x = self.unembed.unembed(x)
return x, inference_params_dict_out
def stateful_forward(self, x, inference_params_dict=None):
for block_idx, block in enumerate(self.blocks):
block_name = "mha" if block_idx in self.config.attn_layer_idxs else "hyena"
inference_params = inference_params_dict[block_name]
x, _ = block(x, inference_params=inference_params)
return x, inference_params_dict
def stateless_forward(self, x, padding_mask=None):
if type(padding_mask) == torch.Tensor:
x = x * padding_mask[..., None]
for _, block in enumerate(self.blocks):
x, _ = block(x, inference_params=None, padding_mask=padding_mask)
return x, None
def initialize_inference_params(self):
print_rank_0("Initializing inference params...")
inference_params_dict = {
"mha": InferenceParams(
max_seqlen=self.config.get("max_seqlen", 8192),
max_batch_size=self.config.get("max_batch_size", 1),
seqlen_offset=0,
),
| # Copyright (c) Together
# This software is distributed under the terms of the Apache License, Version 2.0
# Author: Michael Poli
# Note: MP and PP utilities are removed for ease of use and editing.
try:
except ImportError:
"flash_attn not installed"
class AttentionBlock(nn.Module):
def __init__(self, config, layer_idx) -> None:
super().__init__()
self.config = config
self.pre_norm, self.post_norm = RMSNorm(config), RMSNorm(config)
self.layer_idx = layer_idx
self.proj_groups = config.get("proj_groups", 1)
dtype = config.get("attn_block_dtype", torch.bfloat16)
mlp_dtype = config.get("mlp_dtype", torch.bfloat16)
self.num_attention_heads = config.num_attention_heads
self.hidden_size_per_attention_head = config.hidden_size // config.num_attention_heads
self.counter = 0
self.inner_mha_cls = MHA(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
num_heads_kv=config.num_attention_heads // self.proj_groups,
rotary_emb_dim=config.hidden_size // config.num_attention_heads,
qkv_proj_bias=config.get("qkv_proj_bias", True),
rotary_emb_base=config.get("rotary_emb_base", 10000),
causal=True,
layer_idx=layer_idx,
out_proj_bias=config.get("mha_out_proj_bias", True),
use_flash_attn=self.config.use_flash_attn,
).to(dtype=dtype)
if self.config.get("smeared_gqa", False):
self.inner_mha_cls.num_heads_kv = self.inner_mha_cls.num_heads
self.inner_mha_cls.rotary_emb.register_buffer("inv_freq", self.inner_mha_cls.rotary_emb.inv_freq)
self.mlp = ParallelGatedMLP(config).to(dtype=mlp_dtype)
def forward(self, u, inference_params=None, padding_mask=None, *args, **kwargs):
if (
type(padding_mask) == torch.Tensor
): # workaround for masking bug in FA. This works because Wqkv does not have bias
# and attention scores will be also automatically zeroed.
u = u * padding_mask[..., None]
u = (
self.inner_mha_cls(
self.pre_norm(u),
inference_params=inference_params,
)
+ u
)
if type(padding_mask) == torch.Tensor: # guard against bias
u = u * padding_mask[..., None]
u = self.mlp(self.post_norm(u)) + u
return u, None
class ParallelHyenaFilter(nn.Module):
def __init__(self, config, layer_idx) -> None:
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.hyena_filter_groups = config.get("hyena_filter_groups", self.config.hidden_size)
self.use_flashfft = config.get("use_flashfft", False)
self.state_size = config.state_size
self.hidden_size = config.hidden_size
self.num_filters = config.num_filters
self.inference_mode = config.get("inference_mode", True)
self.counter = 0
self.column_split_hyena = config.get("column_split_hyena", True)
assert self.hidden_size % self.num_filters == 0 and self.num_filters <= self.hidden_size
self.D = nn.Parameter(torch.zeros(self.hidden_size))
# attention heads are not used except to split post short_filter
# projections in the same way as the checkpoint
self.num_attention_heads = config.num_attention_heads
self.hidden_size_per_attention_head = self.hidden_size // self.num_attention_heads
# after preprocessing here we can save the new checkpoint
self.short_filter_length = config.short_filter_length
self.short_filter_weight = nn.Parameter(torch.randn(3 * config.hidden_size, 1, config.short_filter_length))
self.short_filter_bias = (
nn.Parameter(torch.randn(3 * config.hidden_size)) if config.short_filter_bias else None
)
self.engine = HyenaInferenceEngine(layer_idx=layer_idx)
self.use_flash_depthwise = config.get("use_flash_depthwise", False)
self.data_dtype = None
if self.use_flash_depthwise:
self.fir_fn = FlashDepthwiseConv1d(
channels=3 * self.hidden_size,
kernel_size=self.short_filter_length,
padding=self.short_filter_length - 1,
weights=self.short_filter_weight,
bias=self.short_filter_bias,
device=None,
dtype=self.config.get("depthwise_dtype", torch.bfloat16),
)
else:
self.fir_fn = F.conv1d
self.fftconv_fn = None
self.long_fir_threshold = config.get("long_fir_threshold", None)
if self.long_fir_threshold is not None:
assert self.use_flashfft is False, "long_fir_threshold not compatible with fused flashfft"
self.num_systems = self.hidden_size // self.hyena_filter_groups
poles = torch.randn(self.num_systems, self.state_size, 1, 2)
# TODO: bring over init from internals
poles[..., 0] = 1e-2 * torch.randn(self.num_systems, self.state_size, 1)
poles[..., 1] = 1e-3 * torch.randn(self.num_systems, self.state_size, 1)
self.poles = nn.Parameter(poles)
self.residues = nn.Parameter(torch.randn(self.num_systems, self.state_size, 1, 2))
self.h = None
def forward(self, u, inference_params=None, padding_mask=None, *args, **kwargs):
if inference_params is not None and self.layer_idx in inference_params.fir_state_dict.keys():
return self.sequential_forward(u, inference_params)
else:
return self.parallel_forward(u, inference_params, padding_mask)
def parallel_forward(self, u, inference_params=None, padding_mask=None):
L = u.shape[1]
z_pre, fir_state = self.engine.parallel_fir(
self.fir_fn,
u,
self.short_filter_weight,
self.short_filter_bias,
L,
fir_length=self.short_filter_length,
inference_params=inference_params,
padding_mask=padding_mask,
)
if inference_params:
inference_params.fir_state_dict[self.layer_idx] = fir_state
if self.h is None:
h, filter_dtype, poles, residues = self.compute_filter(L, u.device)
else:
h = self.h
filter_dtype = self.h.dtype
if self.hyena_filter_groups > 1:
h = h.repeat_interleave(self.hidden_size // self.hyena_filter_groups, 1)
# if inference_params is not None, we plan to perform generation:
# prefilling is handled by the engine.
dims = (
self.hidden_size,
self.num_attention_heads,
self.hidden_size_per_attention_head,
self.state_size,
self.hyena_filter_groups,
)
y = self.engine.parallel_iir(
z_pre,
h,
self.D,
L,
t=self.t,
poles=self.poles,
residues=self.residues,
dims=dims,
inference_params=inference_params,
layer_idx=self.layer_idx,
prefill_style=self.config.get("prefill_style", "fft"),
use_flashfft=self.use_flashfft,
fftconv_fn=self.fftconv_fn,
column_split_hyena=self.column_split_hyena,
long_fir_threshold=self.long_fir_threshold,
padding_mask=padding_mask,
)
return y, inference_params
def sequential_forward(self, u, inference_params):
if self.data_dtype is None:
self.data_dtype = u.dtype
if len(u.shape) > 2:
u = u[:, -1]
fir_state, iir_state = (
inference_params.fir_state_dict[self.layer_idx],
inference_params.state_dict[self.layer_idx],
)
z_pre, fir_state = self.engine.step_fir(
u, fir_state, weight=self.short_filter_weight, bias=self.short_filter_bias
)
x2, x1, v = (
column_split(z_pre, self.num_attention_heads, self.hidden_size_per_attention_head)
if self.column_split_hyena
else z_pre.split([self.hidden_size, self.hidden_size, self.hidden_size], dim=1)
)
y, iir_state = self.engine.step_iir(
x2,
x1,
v,
self.D,
self.residues,
self.poles,
iir_state,
iir_groups=self.hyena_filter_groups,
)
inference_params.fir_state_dict[self.layer_idx] = fir_state
inference_params.state_dict[self.layer_idx] = iir_state
y = y.to(dtype=self.data_dtype)
return y[:, None], inference_params
def update_time(self, L, device):
"""
Set [0, 1, ..., L-1] where L is the length of the current batch of inputs.
If L is greater than the length of the previous batch, then the time vector is
reinitialized. Otherwise, the time vector is truncated from cache.
"""
if not hasattr(self, "t"):
self.t = torch.arange(L, device=device)[None, None]
elif self.t.shape[-1] < L:
self.t = torch.arange(L, device=device)[None, None]
else:
self.t = self.t[..., :L]
def compute_filter(self, L, device):
self.update_time(L, device)
filter_dtype = torch.float32
residues, log_poles = (
torch.view_as_complex(self.residues.to(filter_dtype)),
torch.view_as_complex(self.poles.to(filter_dtype)).log(),
)
h = (residues * (log_poles * self.t).exp()).real.sum(1)[None]
return h, filter_dtype, log_poles, residues
class ParallelGatedConvBlock(nn.Module):
def __init__(self, config, layer_idx) -> None:
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.low_mem_mode = config.get("low_mem_mode", False)
dtype = config.get("hyena_block_dtype", torch.float32)
mlp_dtype = config.get("mlp_dtype", torch.bfloat16)
self.pre_norm, self.post_norm = RMSNorm(config).to(dtype=dtype), RMSNorm(config).to(dtype=dtype)
self.filter = ParallelHyenaFilter(config, layer_idx).to(dtype=dtype)
self.projections = nn.Linear(config.hidden_size, 3 * config.hidden_size)
self.out_filter_dense = nn.Linear(config.hidden_size, config.hidden_size).to(dtype)
self.mlp = ParallelGatedMLP(config).to(dtype=mlp_dtype)
self.proj_norm_fn = self.proj_norm
self.res_mlp_norm_fn = self.res_mlp_norm
if self.config.get("compile", False):
self.proj_norm_fn = torch.compile(self.proj_norm, fullgraph=True, dynamic=False, mode="reduce-overhead")
self.res_mlp_norm_fn = torch.compile(
self.res_mlp_norm, fullgraph=True, dynamic=False, mode="reduce-overhead"
)
def proj_norm(self, x):
return self.projections(self.pre_norm(x))
def res_mlp_norm(self, x):
return self.mlp(self.post_norm(x)) + x
def forward(self, u, inference_params=None, padding_mask=None, *args, **kwargs):
z = self.proj_norm_fn(u)
if type(padding_mask) == torch.Tensor: # guard against bias
z = z * padding_mask[..., None]
z, inference_params = self.filter(z, inference_params=inference_params, padding_mask=padding_mask)
z_in = self.out_filter_dense(z) + u
if type(padding_mask) == torch.Tensor: # guard against bias
z_in = z_in * padding_mask[..., None]
y = self.res_mlp_norm_fn(z_in)
return y, inference_params
def get_block(config, layer_idx, flash_fft=None):
if layer_idx in config.attn_layer_idxs:
return AttentionBlock(config, layer_idx)
elif layer_idx in config.hyena_layer_idxs:
block = ParallelGatedConvBlock(config, layer_idx)
if config.get("use_flashfft", "False"):
block.filter.fftconv_fn = flash_fft
return block
else:
raise NotImplementedError
class StripedHyena(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.embedding_layer = VocabParallelEmbedding(config)
self.norm = RMSNorm(config) if config.get("final_norm", True) else None
self.unembed = self.emb if config.tie_embeddings else VocabParallelEmbedding(config)
if config.get("use_flashfft", "False"):
self.flash_fft = FlashFFTConv(2 * config.seqlen, dtype=torch.bfloat16)
else:
self.flash_fft = None
self.blocks = nn.ModuleList(
get_block(config, layer_idx, flash_fft=self.flash_fft) for layer_idx in range(config.num_layers)
)
def forward(self, x, inference_params_dict=None, padding_mask=None):
L = x.shape[1]
x = self.embedding_layer.embed(x)
if inference_params_dict is not None:
x, inference_params_dict_out = self.stateful_forward(
x,
inference_params_dict=inference_params_dict,
)
else:
x, inference_params_dict_out = self.stateless_forward(x, padding_mask=padding_mask)
x = self.norm(x)
x = self.unembed.unembed(x)
return x, inference_params_dict_out
def stateful_forward(self, x, inference_params_dict=None):
for block_idx, block in enumerate(self.blocks):
block_name = "mha" if block_idx in self.config.attn_layer_idxs else "hyena"
inference_params = inference_params_dict[block_name]
x, _ = block(x, inference_params=inference_params)
return x, inference_params_dict
def stateless_forward(self, x, padding_mask=None):
if type(padding_mask) == torch.Tensor:
x = x * padding_mask[..., None]
for _, block in enumerate(self.blocks):
x, _ = block(x, inference_params=None, padding_mask=padding_mask)
return x, None
def initialize_inference_params(self):
print_rank_0("Initializing inference params...")
inference_params_dict = {
"mha": InferenceParams(
max_seqlen=self.config.get("max_seqlen", 8192),
max_batch_size=self.config.get("max_batch_size", 1),
seqlen_offset=0,
), | "hyena": RecurrentInferenceParams( | 1 | 2023-11-21 15:56:04+00:00 | 12k |
MobileTeleSystems/CoolGraph | cool_graph/runners.py | [
{
"identifier": "RawDataProcessor",
"path": "cool_graph/data/data_processor.py",
"snippet": "class RawDataProcessor:\n \"\"\"\n Preprocessing datasets.\n\n Args:\n groups_names (Dict[int, str]): Name of groups in nodes.\n group_names_node_features (Dict[str, List[str]]): Name of f... | import os
import pathlib
import hydra
import numpy as np
import optuna
import pandas as pd
import torch
from datetime import datetime
from itertools import product
from pathlib import Path
from typing import Dict, List, Literal, Optional
from hydra import (
compose,
core,
initialize,
initialize_config_dir,
initialize_config_module,
)
from omegaconf import DictConfig, OmegaConf
from optuna.trial import TrialState
from sklearn.model_selection import train_test_split
from torch_geometric.data import Data
from torch_geometric.loader import NeighborLoader, NeighborSampler
from tqdm import tqdm
from cool_graph.data import RawDataProcessor
from cool_graph.data.batch import get_auto_batch_size
from cool_graph.data.loaders import create_loaders
from cool_graph.logging import setup_mlflow_from_config
from cool_graph.parameter_search import (
model_params_to_trial_params,
sample_model_params,
)
from cool_graph.train import Trainer | 10,037 |
def create_cfg(config: str, overrides: List[str], path_base: str = "cfg") -> Dict:
assert path_base in ("cfg", "cwd")
core.global_hydra.GlobalHydra.instance().clear()
if os.path.isabs(config):
config_path = pathlib.Path(config).parent
else:
config_path = pathlib.Path(os.getcwd()) / pathlib.Path(config).parent
config_name = pathlib.Path(config).name.replace(".yaml", "")
initialize_config_dir(str(config_path), version_base=None)
cfg = compose(config_name=config_name, overrides=overrides)
return cfg
class ConfigRunner:
r"""Runner for cli mode. Using only in cli.
This class allows to load data + split data per batchs + split data per train/val + training.
See the config full.yaml in ./config for knowing what excactly using as data/logging/model_params/training/metrics.
You can use default params, but also you can change it.
Steps for changing confis:
- make get_config --configs path_where_you_need_configs (default: new path ./configs by itself)
"""
def __init__(self, config: Optional[DictConfig]) -> None:
cfg = OmegaConf.to_container(config, resolve=True)
self.cfg = cfg
self.target_names = cfg["training"]["targets"]
self.groups_names = cfg["data"]["groups_names"]
self.target_weights = cfg["training"]["loss"]["target_weights"]
self.read_edge_attr = cfg["data"].get("read_edge_attr", True)
self.batch_size = cfg["training"]["batch_size"]
self.group_mask_col = cfg["data"]["group_mask_col"]
self.label_mask_col = cfg["data"]["label_mask_col"]
self.label_cols = cfg["data"]["label_cols"]
self.label_index_col = cfg["data"]["label_index_col"]
self.edge_index_cols = cfg["data"]["edge_index_cols"]
self.num_neighbors = cfg["training"]["num_neighbors"]
self.features_edges_names = cfg["data"].get("features_edges")
self.group_names_node_features = cfg["data"]["features"]
self.train_paths = cfg["data"]["train"]
self.val_paths = cfg["data"]["validation"]
self.metrics = cfg["metrics"]
self.chkpt_dir = (
pathlib.Path(cfg["logging"]["checkpoint_dir"]) / str(datetime.now())[:19]
)
os.makedirs(self.chkpt_dir, exist_ok=True)
if self.cfg["logging"].get("use_mlflow", False):
|
def create_cfg(config: str, overrides: List[str], path_base: str = "cfg") -> Dict:
assert path_base in ("cfg", "cwd")
core.global_hydra.GlobalHydra.instance().clear()
if os.path.isabs(config):
config_path = pathlib.Path(config).parent
else:
config_path = pathlib.Path(os.getcwd()) / pathlib.Path(config).parent
config_name = pathlib.Path(config).name.replace(".yaml", "")
initialize_config_dir(str(config_path), version_base=None)
cfg = compose(config_name=config_name, overrides=overrides)
return cfg
class ConfigRunner:
r"""Runner for cli mode. Using only in cli.
This class allows to load data + split data per batchs + split data per train/val + training.
See the config full.yaml in ./config for knowing what excactly using as data/logging/model_params/training/metrics.
You can use default params, but also you can change it.
Steps for changing confis:
- make get_config --configs path_where_you_need_configs (default: new path ./configs by itself)
"""
def __init__(self, config: Optional[DictConfig]) -> None:
cfg = OmegaConf.to_container(config, resolve=True)
self.cfg = cfg
self.target_names = cfg["training"]["targets"]
self.groups_names = cfg["data"]["groups_names"]
self.target_weights = cfg["training"]["loss"]["target_weights"]
self.read_edge_attr = cfg["data"].get("read_edge_attr", True)
self.batch_size = cfg["training"]["batch_size"]
self.group_mask_col = cfg["data"]["group_mask_col"]
self.label_mask_col = cfg["data"]["label_mask_col"]
self.label_cols = cfg["data"]["label_cols"]
self.label_index_col = cfg["data"]["label_index_col"]
self.edge_index_cols = cfg["data"]["edge_index_cols"]
self.num_neighbors = cfg["training"]["num_neighbors"]
self.features_edges_names = cfg["data"].get("features_edges")
self.group_names_node_features = cfg["data"]["features"]
self.train_paths = cfg["data"]["train"]
self.val_paths = cfg["data"]["validation"]
self.metrics = cfg["metrics"]
self.chkpt_dir = (
pathlib.Path(cfg["logging"]["checkpoint_dir"]) / str(datetime.now())[:19]
)
os.makedirs(self.chkpt_dir, exist_ok=True)
if self.cfg["logging"].get("use_mlflow", False): | setup_mlflow_from_config(cfg["logging"]["mlflow"]) | 3 | 2023-11-22 09:44:16+00:00 | 12k |
HeliosZhao/Animate124 | dnerf/provider.py | [
{
"identifier": "get_rays",
"path": "nerf/utils.py",
"snippet": "@torch.cuda.amp.autocast(enabled=False)\ndef get_rays(poses, intrinsics, H, W, N=-1, error_map=None):\n ''' get rays\n Args:\n poses: [B, 4, 4], cam2world\n intrinsics: [4]\n H, W, N: int\n error_map: [B, ... | import random
import numpy as np
import math
import torch
import torch.nn.functional as F
import ipdb
import logging
from scipy.spatial.transform import Slerp, Rotation
from torch.utils.data import DataLoader
from nerf.utils import get_rays, safe_normalize
from nerf.provider import NeRFDataset, visualize_poses, DIR_COLORS, get_view_direction, rand_poses, circle_poses, generate_grid_points | 7,229 | ## scale delta will make the camera not exceed the range, also, it cannot across the range
# for example, phi from -pi/4 to pi/4 is a reasonable motion but scale delta will make it impossible
d_thetas = d_thetas.clamp(theta_range[0]-thetas, theta_range[1]-thetas) # d_theta + theta in range [theta_range[0], theta_range[1]]
d_phis = d_phis.clamp(phi_range[0]-init_phis, phi_range[1]-init_phis) # d_phi + init_phi in range [phi_range[0], phi_range[1]] # init phi is before convert to 0-2pi
##
num_frames = opt.num_frames
scale = torch.arange(num_frames, device=device) / num_frames # 0,1/f, ... f-1/f, F
thetas_dyn = thetas + scale * d_thetas # F
phis_dyn = init_phis + scale * d_phis # F
phis_dyn[phis_dyn < 0] += 2 * np.pi
assert thetas_dyn[0] == thetas[0] and phis_dyn[0] == init_phis[0]
centers = torch.stack([
radius * torch.sin(thetas_dyn) * torch.sin(phis_dyn),
radius * torch.cos(thetas_dyn),
radius * torch.sin(thetas_dyn) * torch.cos(phis_dyn),
], dim=-1) # [B, 3] # F,3
# lookat
forward_vector = safe_normalize(centers - targets)
up_vector = torch.FloatTensor([0, 1, 0]).to(device).unsqueeze(0).repeat(num_frames, 1)
right_vector = safe_normalize(torch.cross(forward_vector, up_vector, dim=-1))
if opt.jitter_pose:
up_noise = torch.randn_like(up_vector) * opt.jitter_up
else:
up_noise = 0
up_vector = safe_normalize(torch.cross(right_vector, forward_vector, dim=-1) + up_noise)
poses = torch.eye(4, dtype=torch.float, device=device).unsqueeze(0).repeat(num_frames, 1, 1)
poses[:, :3, :3] = torch.stack((right_vector, up_vector, forward_vector), dim=-1)
poses[:, :3, 3] = centers
if return_dirs:
dirs = get_view_direction(thetas_dyn, phis_dyn, angle_overhead, angle_front)
else:
dirs = None
# back to degree
thetas_dyn = thetas_dyn / np.pi * 180
phis_dyn = phis_dyn / np.pi * 180
radius = radius.repeat(num_frames)
return poses, dirs, thetas_dyn, phis_dyn, radius
class DNeRFDataset(NeRFDataset):
def __init__(self, opt, device, type='train', H=256, W=256, size=100):
super().__init__(opt, device, type, H, W, size)
self.num_frames = opt.num_frames
self.num_test_frames = opt.get("num_test_frames", self.num_frames)
self.dynamic_cam_rate = self.opt.dynamic_cam_rate
self.precision = opt.get('precision', 64)
self.zero_precision = opt.get('zero_precision', self.precision)
logger.info(f"Training dataset, random time sampling precision is {self.precision}, zero time sampling precision is {self.zero_precision}")
def get_default_view_data(self):
H = int(self.opt.known_view_scale * self.H)
W = int(self.opt.known_view_scale * self.W)
cx = H / 2
cy = W / 2
radii = torch.FloatTensor(self.opt.ref_radii).to(self.device)
thetas = torch.FloatTensor(self.opt.ref_polars).to(self.device)
phis = torch.FloatTensor(self.opt.ref_azimuths).to(self.device)
poses, dirs = circle_poses(self.device, radius=radii, theta=thetas, phi=phis, return_dirs=True, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front)
fov = self.opt.default_fovy
focal = H / (2 * np.tan(np.deg2rad(fov) / 2))
intrinsics = np.array([focal, focal, cx, cy])
projection = torch.tensor([
[2*focal/W, 0, 0, 0],
[0, -2*focal/H, 0, 0],
[0, 0, -(self.far+self.near)/(self.far-self.near), -(2*self.far*self.near)/(self.far-self.near)],
[0, 0, -1, 0]
], dtype=torch.float32, device=self.device).unsqueeze(0).repeat(len(radii), 1, 1)
mvp = projection @ torch.inverse(poses) # [B, 4, 4]
# sample a low-resolution but full image
rays = get_rays(poses, intrinsics, H, W, -1)
if rays['rays_o'].size(0):
time = torch.FloatTensor([0]).reshape(rays['rays_o'].size(0), 1)
else:
time = None
data = {
'H': H,
'W': W,
'rays_o': rays['rays_o'],
'rays_d': rays['rays_d'],
'dir': dirs,
'mvp': mvp,
'time': time,
'polar': self.opt.ref_polars,
'azimuth': self.opt.ref_azimuths,
'radius': self.opt.ref_radii,
}
return data
def collate(self, index):
B = len(index)
dynamic_cam = False
start_from_zero = False
if self.training:
if np.random.random() < self.dynamic_cam_rate:
dynamic_cam = True
poses, dirs, thetas, phis, radius = rand_poses_trajectory(B, self.device, self.opt, radius_range=self.opt.radius_range, theta_range=self.opt.theta_range, phi_range=self.opt.phi_range, return_dirs=True, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front, uniform_sphere_rate=self.opt.uniform_sphere_rate)
## poses F,4,4
else:
# random pose on the fly, size 1,4,4
|
logger = logging.getLogger(__name__)
def rand_poses_trajectory(size, device, opt, radius_range=[1, 1.5], theta_range=[0, 120], phi_range=[0, 360], return_dirs=False, angle_overhead=30, angle_front=60, uniform_sphere_rate=0.5, static_view_rate=0.):
''' generate random poses from an orbit camera
Args:
size: batch size of generated poses.
device: where to allocate the output.
radius: camera radius [1.8,1.8]
theta_range: [min, max], should be in [45, 135]
phi_range: [min, max], should be in [-180, 180]
Return:
poses: [size, 4, 4]
'''
assert size == 1 and not opt.jitter_pose
theta_range = np.array(theta_range) / 180 * np.pi # -pi/4 ~ 3pi/4
phi_range = np.array(phi_range) / 180 * np.pi # -pi ~ pi
angle_overhead = angle_overhead / 180 * np.pi # pi/6
angle_front = angle_front / 180 * np.pi # pi/3
radius = torch.rand(size, device=device) * (radius_range[1] - radius_range[0]) + radius_range[0]
if random.random() < uniform_sphere_rate: # 0.5
unit_centers = F.normalize(
torch.stack([
(torch.rand(size, device=device) - 0.5) * 2.0,
torch.rand(size, device=device),
(torch.rand(size, device=device) - 0.5) * 2.0,
], dim=-1), p=2, dim=1
)
thetas = torch.acos(unit_centers[:,1])
phis = torch.atan2(unit_centers[:,0], unit_centers[:,2])
init_phis = phis # init phi can be smaller than 0
phis[phis < 0] += 2 * np.pi
centers = unit_centers * radius.unsqueeze(-1)
else:
thetas = torch.rand(size, device=device) * (theta_range[1] - theta_range[0]) + theta_range[0] # 1
phis = torch.rand(size, device=device) * (phi_range[1] - phi_range[0]) + phi_range[0]
init_phis = phis
phis[phis < 0] += 2 * np.pi
centers = torch.stack([
radius * torch.sin(thetas) * torch.sin(phis),
radius * torch.cos(thetas),
radius * torch.sin(thetas) * torch.cos(phis),
], dim=-1) # [B, 3]
targets = 0
# scale_delta = False
# ipdb.set_trace()
## delta thetas
d_theta_range = [-np.pi/4, np.pi/4]
d_phi_range = [-np.pi/2, np.pi/2]
d_thetas = torch.rand(size, device=device) * (d_theta_range[1] - d_theta_range[0]) + d_theta_range[0] # -np.pi/4, np.pi/4
d_phis = torch.rand(size, device=device) * (d_phi_range[1] - d_phi_range[0]) + d_phi_range[0] # -np.pi/2, np.pi/2
if opt.scale_delta:
## scale delta will make the camera not exceed the range, also, it cannot across the range
# for example, phi from -pi/4 to pi/4 is a reasonable motion but scale delta will make it impossible
d_thetas = d_thetas.clamp(theta_range[0]-thetas, theta_range[1]-thetas) # d_theta + theta in range [theta_range[0], theta_range[1]]
d_phis = d_phis.clamp(phi_range[0]-init_phis, phi_range[1]-init_phis) # d_phi + init_phi in range [phi_range[0], phi_range[1]] # init phi is before convert to 0-2pi
##
num_frames = opt.num_frames
scale = torch.arange(num_frames, device=device) / num_frames # 0,1/f, ... f-1/f, F
thetas_dyn = thetas + scale * d_thetas # F
phis_dyn = init_phis + scale * d_phis # F
phis_dyn[phis_dyn < 0] += 2 * np.pi
assert thetas_dyn[0] == thetas[0] and phis_dyn[0] == init_phis[0]
centers = torch.stack([
radius * torch.sin(thetas_dyn) * torch.sin(phis_dyn),
radius * torch.cos(thetas_dyn),
radius * torch.sin(thetas_dyn) * torch.cos(phis_dyn),
], dim=-1) # [B, 3] # F,3
# lookat
forward_vector = safe_normalize(centers - targets)
up_vector = torch.FloatTensor([0, 1, 0]).to(device).unsqueeze(0).repeat(num_frames, 1)
right_vector = safe_normalize(torch.cross(forward_vector, up_vector, dim=-1))
if opt.jitter_pose:
up_noise = torch.randn_like(up_vector) * opt.jitter_up
else:
up_noise = 0
up_vector = safe_normalize(torch.cross(right_vector, forward_vector, dim=-1) + up_noise)
poses = torch.eye(4, dtype=torch.float, device=device).unsqueeze(0).repeat(num_frames, 1, 1)
poses[:, :3, :3] = torch.stack((right_vector, up_vector, forward_vector), dim=-1)
poses[:, :3, 3] = centers
if return_dirs:
dirs = get_view_direction(thetas_dyn, phis_dyn, angle_overhead, angle_front)
else:
dirs = None
# back to degree
thetas_dyn = thetas_dyn / np.pi * 180
phis_dyn = phis_dyn / np.pi * 180
radius = radius.repeat(num_frames)
return poses, dirs, thetas_dyn, phis_dyn, radius
class DNeRFDataset(NeRFDataset):
def __init__(self, opt, device, type='train', H=256, W=256, size=100):
super().__init__(opt, device, type, H, W, size)
self.num_frames = opt.num_frames
self.num_test_frames = opt.get("num_test_frames", self.num_frames)
self.dynamic_cam_rate = self.opt.dynamic_cam_rate
self.precision = opt.get('precision', 64)
self.zero_precision = opt.get('zero_precision', self.precision)
logger.info(f"Training dataset, random time sampling precision is {self.precision}, zero time sampling precision is {self.zero_precision}")
def get_default_view_data(self):
H = int(self.opt.known_view_scale * self.H)
W = int(self.opt.known_view_scale * self.W)
cx = H / 2
cy = W / 2
radii = torch.FloatTensor(self.opt.ref_radii).to(self.device)
thetas = torch.FloatTensor(self.opt.ref_polars).to(self.device)
phis = torch.FloatTensor(self.opt.ref_azimuths).to(self.device)
poses, dirs = circle_poses(self.device, radius=radii, theta=thetas, phi=phis, return_dirs=True, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front)
fov = self.opt.default_fovy
focal = H / (2 * np.tan(np.deg2rad(fov) / 2))
intrinsics = np.array([focal, focal, cx, cy])
projection = torch.tensor([
[2*focal/W, 0, 0, 0],
[0, -2*focal/H, 0, 0],
[0, 0, -(self.far+self.near)/(self.far-self.near), -(2*self.far*self.near)/(self.far-self.near)],
[0, 0, -1, 0]
], dtype=torch.float32, device=self.device).unsqueeze(0).repeat(len(radii), 1, 1)
mvp = projection @ torch.inverse(poses) # [B, 4, 4]
# sample a low-resolution but full image
rays = get_rays(poses, intrinsics, H, W, -1)
if rays['rays_o'].size(0):
time = torch.FloatTensor([0]).reshape(rays['rays_o'].size(0), 1)
else:
time = None
data = {
'H': H,
'W': W,
'rays_o': rays['rays_o'],
'rays_d': rays['rays_d'],
'dir': dirs,
'mvp': mvp,
'time': time,
'polar': self.opt.ref_polars,
'azimuth': self.opt.ref_azimuths,
'radius': self.opt.ref_radii,
}
return data
def collate(self, index):
B = len(index)
dynamic_cam = False
start_from_zero = False
if self.training:
if np.random.random() < self.dynamic_cam_rate:
dynamic_cam = True
poses, dirs, thetas, phis, radius = rand_poses_trajectory(B, self.device, self.opt, radius_range=self.opt.radius_range, theta_range=self.opt.theta_range, phi_range=self.opt.phi_range, return_dirs=True, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front, uniform_sphere_rate=self.opt.uniform_sphere_rate)
## poses F,4,4
else:
# random pose on the fly, size 1,4,4 | poses, dirs, thetas, phis, radius = rand_poses(B, self.device, self.opt, radius_range=self.opt.radius_range, theta_range=self.opt.theta_range, phi_range=self.opt.phi_range, return_dirs=True, angle_overhead=self.opt.angle_overhead, angle_front=self.opt.angle_front, uniform_sphere_rate=self.opt.uniform_sphere_rate) | 6 | 2023-11-23 10:34:08+00:00 | 12k |
tingxueronghua/ChartLlama-code | llava/model/language_model/mpt/modeling_mpt.py | [
{
"identifier": "attn_bias_shape",
"path": "llava/model/language_model/mpt/attention.py",
"snippet": "def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if al... | import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Optional, Tuple, Union
from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from .attention import attn_bias_shape, build_attn_bias
from .blocks import MPTBlock
from .custom_embedding import SharedEmbedding
from .norm import NORM_CLASS_REGISTRY
from .configuration_mpt import MPTConfig
from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm
from .meta_init_context import init_empty_weights
from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
from .flash_attn_triton import flash_attn_func | 7,343 | """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype)
| """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype) | self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max) | 1 | 2023-11-26 17:26:00+00:00 | 12k |
eliphatfs/calibur | calibur/ndarray_extension.py | [
{
"identifier": "container_catamorphism",
"path": "calibur/generic_utils.py",
"snippet": "def container_catamorphism(data, func):\n \"\"\"\n Transforms leaf elements in ``list``, ``dict``, ``tuple``, ``set`` with ``func``, aka. *tree-map*.\n Nested containers are also supported.\n \"\"\"\n ... | import numpy
from typing import Union, TypeVar
from functools import wraps
from .generic_utils import container_catamorphism, type_match | 7,937 | brag: "GraphicsNDArray" = qattr((..., [2, 0, 3, 1]))
brab: "GraphicsNDArray" = qattr((..., [2, 0, 3, 2]))
braa: "GraphicsNDArray" = qattr((..., [2, 0, 3, 3]))
bgrr: "GraphicsNDArray" = qattr((..., [2, 1, 0, 0]))
bgrg: "GraphicsNDArray" = qattr((..., [2, 1, 0, 1]))
bgrb: "GraphicsNDArray" = qattr((..., [2, 1, 0, 2]))
bgra: "GraphicsNDArray" = qattr((..., [2, 1, 0, 3]))
bggr: "GraphicsNDArray" = qattr((..., [2, 1, 1, 0]))
bggg: "GraphicsNDArray" = qattr((..., [2, 1, 1, 1]))
bggb: "GraphicsNDArray" = qattr((..., [2, 1, 1, 2]))
bgga: "GraphicsNDArray" = qattr((..., [2, 1, 1, 3]))
bgbr: "GraphicsNDArray" = qattr((..., [2, 1, 2, 0]))
bgbg: "GraphicsNDArray" = qattr((..., [2, 1, 2, 1]))
bgbb: "GraphicsNDArray" = qattr((..., [2, 1, 2, 2]))
bgba: "GraphicsNDArray" = qattr((..., [2, 1, 2, 3]))
bgar: "GraphicsNDArray" = qattr((..., [2, 1, 3, 0]))
bgag: "GraphicsNDArray" = qattr((..., [2, 1, 3, 1]))
bgab: "GraphicsNDArray" = qattr((..., [2, 1, 3, 2]))
bgaa: "GraphicsNDArray" = qattr((..., [2, 1, 3, 3]))
bbrr: "GraphicsNDArray" = qattr((..., [2, 2, 0, 0]))
bbrg: "GraphicsNDArray" = qattr((..., [2, 2, 0, 1]))
bbrb: "GraphicsNDArray" = qattr((..., [2, 2, 0, 2]))
bbra: "GraphicsNDArray" = qattr((..., [2, 2, 0, 3]))
bbgr: "GraphicsNDArray" = qattr((..., [2, 2, 1, 0]))
bbgg: "GraphicsNDArray" = qattr((..., [2, 2, 1, 1]))
bbgb: "GraphicsNDArray" = qattr((..., [2, 2, 1, 2]))
bbga: "GraphicsNDArray" = qattr((..., [2, 2, 1, 3]))
bbbr: "GraphicsNDArray" = qattr((..., [2, 2, 2, 0]))
bbbg: "GraphicsNDArray" = qattr((..., [2, 2, 2, 1]))
bbbb: "GraphicsNDArray" = qattr((..., [2, 2, 2, 2]))
bbba: "GraphicsNDArray" = qattr((..., [2, 2, 2, 3]))
bbar: "GraphicsNDArray" = qattr((..., [2, 2, 3, 0]))
bbag: "GraphicsNDArray" = qattr((..., [2, 2, 3, 1]))
bbab: "GraphicsNDArray" = qattr((..., [2, 2, 3, 2]))
bbaa: "GraphicsNDArray" = qattr((..., [2, 2, 3, 3]))
barr: "GraphicsNDArray" = qattr((..., [2, 3, 0, 0]))
barg: "GraphicsNDArray" = qattr((..., [2, 3, 0, 1]))
barb: "GraphicsNDArray" = qattr((..., [2, 3, 0, 2]))
bara: "GraphicsNDArray" = qattr((..., [2, 3, 0, 3]))
bagr: "GraphicsNDArray" = qattr((..., [2, 3, 1, 0]))
bagg: "GraphicsNDArray" = qattr((..., [2, 3, 1, 1]))
bagb: "GraphicsNDArray" = qattr((..., [2, 3, 1, 2]))
baga: "GraphicsNDArray" = qattr((..., [2, 3, 1, 3]))
babr: "GraphicsNDArray" = qattr((..., [2, 3, 2, 0]))
babg: "GraphicsNDArray" = qattr((..., [2, 3, 2, 1]))
babb: "GraphicsNDArray" = qattr((..., [2, 3, 2, 2]))
baba: "GraphicsNDArray" = qattr((..., [2, 3, 2, 3]))
baar: "GraphicsNDArray" = qattr((..., [2, 3, 3, 0]))
baag: "GraphicsNDArray" = qattr((..., [2, 3, 3, 1]))
baab: "GraphicsNDArray" = qattr((..., [2, 3, 3, 2]))
baaa: "GraphicsNDArray" = qattr((..., [2, 3, 3, 3]))
arrr: "GraphicsNDArray" = qattr((..., [3, 0, 0, 0]))
arrg: "GraphicsNDArray" = qattr((..., [3, 0, 0, 1]))
arrb: "GraphicsNDArray" = qattr((..., [3, 0, 0, 2]))
arra: "GraphicsNDArray" = qattr((..., [3, 0, 0, 3]))
argr: "GraphicsNDArray" = qattr((..., [3, 0, 1, 0]))
argg: "GraphicsNDArray" = qattr((..., [3, 0, 1, 1]))
argb: "GraphicsNDArray" = qattr((..., [3, 0, 1, 2]))
arga: "GraphicsNDArray" = qattr((..., [3, 0, 1, 3]))
arbr: "GraphicsNDArray" = qattr((..., [3, 0, 2, 0]))
arbg: "GraphicsNDArray" = qattr((..., [3, 0, 2, 1]))
arbb: "GraphicsNDArray" = qattr((..., [3, 0, 2, 2]))
arba: "GraphicsNDArray" = qattr((..., [3, 0, 2, 3]))
arar: "GraphicsNDArray" = qattr((..., [3, 0, 3, 0]))
arag: "GraphicsNDArray" = qattr((..., [3, 0, 3, 1]))
arab: "GraphicsNDArray" = qattr((..., [3, 0, 3, 2]))
araa: "GraphicsNDArray" = qattr((..., [3, 0, 3, 3]))
agrr: "GraphicsNDArray" = qattr((..., [3, 1, 0, 0]))
agrg: "GraphicsNDArray" = qattr((..., [3, 1, 0, 1]))
agrb: "GraphicsNDArray" = qattr((..., [3, 1, 0, 2]))
agra: "GraphicsNDArray" = qattr((..., [3, 1, 0, 3]))
aggr: "GraphicsNDArray" = qattr((..., [3, 1, 1, 0]))
aggg: "GraphicsNDArray" = qattr((..., [3, 1, 1, 1]))
aggb: "GraphicsNDArray" = qattr((..., [3, 1, 1, 2]))
agga: "GraphicsNDArray" = qattr((..., [3, 1, 1, 3]))
agbr: "GraphicsNDArray" = qattr((..., [3, 1, 2, 0]))
agbg: "GraphicsNDArray" = qattr((..., [3, 1, 2, 1]))
agbb: "GraphicsNDArray" = qattr((..., [3, 1, 2, 2]))
agba: "GraphicsNDArray" = qattr((..., [3, 1, 2, 3]))
agar: "GraphicsNDArray" = qattr((..., [3, 1, 3, 0]))
agag: "GraphicsNDArray" = qattr((..., [3, 1, 3, 1]))
agab: "GraphicsNDArray" = qattr((..., [3, 1, 3, 2]))
agaa: "GraphicsNDArray" = qattr((..., [3, 1, 3, 3]))
abrr: "GraphicsNDArray" = qattr((..., [3, 2, 0, 0]))
abrg: "GraphicsNDArray" = qattr((..., [3, 2, 0, 1]))
abrb: "GraphicsNDArray" = qattr((..., [3, 2, 0, 2]))
abra: "GraphicsNDArray" = qattr((..., [3, 2, 0, 3]))
abgr: "GraphicsNDArray" = qattr((..., [3, 2, 1, 0]))
abgg: "GraphicsNDArray" = qattr((..., [3, 2, 1, 1]))
abgb: "GraphicsNDArray" = qattr((..., [3, 2, 1, 2]))
abga: "GraphicsNDArray" = qattr((..., [3, 2, 1, 3]))
abbr: "GraphicsNDArray" = qattr((..., [3, 2, 2, 0]))
abbg: "GraphicsNDArray" = qattr((..., [3, 2, 2, 1]))
abbb: "GraphicsNDArray" = qattr((..., [3, 2, 2, 2]))
abba: "GraphicsNDArray" = qattr((..., [3, 2, 2, 3]))
abar: "GraphicsNDArray" = qattr((..., [3, 2, 3, 0]))
abag: "GraphicsNDArray" = qattr((..., [3, 2, 3, 1]))
abab: "GraphicsNDArray" = qattr((..., [3, 2, 3, 2]))
abaa: "GraphicsNDArray" = qattr((..., [3, 2, 3, 3]))
aarr: "GraphicsNDArray" = qattr((..., [3, 3, 0, 0]))
aarg: "GraphicsNDArray" = qattr((..., [3, 3, 0, 1]))
aarb: "GraphicsNDArray" = qattr((..., [3, 3, 0, 2]))
aara: "GraphicsNDArray" = qattr((..., [3, 3, 0, 3]))
aagr: "GraphicsNDArray" = qattr((..., [3, 3, 1, 0]))
aagg: "GraphicsNDArray" = qattr((..., [3, 3, 1, 1]))
aagb: "GraphicsNDArray" = qattr((..., [3, 3, 1, 2]))
aaga: "GraphicsNDArray" = qattr((..., [3, 3, 1, 3]))
aabr: "GraphicsNDArray" = qattr((..., [3, 3, 2, 0]))
aabg: "GraphicsNDArray" = qattr((..., [3, 3, 2, 1]))
aabb: "GraphicsNDArray" = qattr((..., [3, 3, 2, 2]))
aaba: "GraphicsNDArray" = qattr((..., [3, 3, 2, 3]))
aaar: "GraphicsNDArray" = qattr((..., [3, 3, 3, 0]))
aaag: "GraphicsNDArray" = qattr((..., [3, 3, 3, 1]))
aaab: "GraphicsNDArray" = qattr((..., [3, 3, 3, 2]))
aaaa: "GraphicsNDArray" = qattr((..., [3, 3, 3, 3]))
NDArray = Union[numpy.ndarray, GraphicsNDArray]
T = TypeVar("T")
|
USE_QUICK_ATTRIBUTES = True
class GraphicsNDArray(numpy.ndarray):
"""
A subclass of ``numpy.ndarray``.
It adds GLSL-like access to components,
e.g. ``a.xyz / a.w`` is a shorthand for ``a[..., [0, 1, 2]] / a[..., [3]]``.
Note that single-component results retains a singleton ``1`` dimension.
"""
def __new__(cls, input_array):
return numpy.asarray(input_array).view(cls)
def __array_function__(self, func, types, args, kwargs):
result = super().__array_function__(func, types, args, kwargs)
if isinstance(result, numpy.ndarray):
# print("__array_function__ cast")
return GraphicsNDArray(result)
return result
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): # this method is called whenever you use a ufunc
f = {
"reduce": ufunc.reduce,
"accumulate": ufunc.accumulate,
"reduceat": ufunc.reduceat,
"outer": ufunc.outer,
"at": ufunc.at,
"__call__": ufunc,
}
# print("__array_ufunc__ cast")
output = GraphicsNDArray(f[method](*(numpy.asarray(i) for i in inputs), **kwargs))
return output
def qattr(x: tuple):
return property(lambda self: self[x])
if USE_QUICK_ATTRIBUTES:
x: "GraphicsNDArray" = qattr((..., [0]))
y: "GraphicsNDArray" = qattr((..., [1]))
z: "GraphicsNDArray" = qattr((..., [2]))
w: "GraphicsNDArray" = qattr((..., [3]))
xx: "GraphicsNDArray" = qattr((..., [0, 0]))
xy: "GraphicsNDArray" = qattr((..., [0, 1]))
xz: "GraphicsNDArray" = qattr((..., [0, 2]))
xw: "GraphicsNDArray" = qattr((..., [0, 3]))
yx: "GraphicsNDArray" = qattr((..., [1, 0]))
yy: "GraphicsNDArray" = qattr((..., [1, 1]))
yz: "GraphicsNDArray" = qattr((..., [1, 2]))
yw: "GraphicsNDArray" = qattr((..., [1, 3]))
zx: "GraphicsNDArray" = qattr((..., [2, 0]))
zy: "GraphicsNDArray" = qattr((..., [2, 1]))
zz: "GraphicsNDArray" = qattr((..., [2, 2]))
zw: "GraphicsNDArray" = qattr((..., [2, 3]))
wx: "GraphicsNDArray" = qattr((..., [3, 0]))
wy: "GraphicsNDArray" = qattr((..., [3, 1]))
wz: "GraphicsNDArray" = qattr((..., [3, 2]))
ww: "GraphicsNDArray" = qattr((..., [3, 3]))
xxx: "GraphicsNDArray" = qattr((..., [0, 0, 0]))
xxy: "GraphicsNDArray" = qattr((..., [0, 0, 1]))
xxz: "GraphicsNDArray" = qattr((..., [0, 0, 2]))
xxw: "GraphicsNDArray" = qattr((..., [0, 0, 3]))
xyx: "GraphicsNDArray" = qattr((..., [0, 1, 0]))
xyy: "GraphicsNDArray" = qattr((..., [0, 1, 1]))
xyz: "GraphicsNDArray" = qattr((..., [0, 1, 2]))
xyw: "GraphicsNDArray" = qattr((..., [0, 1, 3]))
xzx: "GraphicsNDArray" = qattr((..., [0, 2, 0]))
xzy: "GraphicsNDArray" = qattr((..., [0, 2, 1]))
xzz: "GraphicsNDArray" = qattr((..., [0, 2, 2]))
xzw: "GraphicsNDArray" = qattr((..., [0, 2, 3]))
xwx: "GraphicsNDArray" = qattr((..., [0, 3, 0]))
xwy: "GraphicsNDArray" = qattr((..., [0, 3, 1]))
xwz: "GraphicsNDArray" = qattr((..., [0, 3, 2]))
xww: "GraphicsNDArray" = qattr((..., [0, 3, 3]))
yxx: "GraphicsNDArray" = qattr((..., [1, 0, 0]))
yxy: "GraphicsNDArray" = qattr((..., [1, 0, 1]))
yxz: "GraphicsNDArray" = qattr((..., [1, 0, 2]))
yxw: "GraphicsNDArray" = qattr((..., [1, 0, 3]))
yyx: "GraphicsNDArray" = qattr((..., [1, 1, 0]))
yyy: "GraphicsNDArray" = qattr((..., [1, 1, 1]))
yyz: "GraphicsNDArray" = qattr((..., [1, 1, 2]))
yyw: "GraphicsNDArray" = qattr((..., [1, 1, 3]))
yzx: "GraphicsNDArray" = qattr((..., [1, 2, 0]))
yzy: "GraphicsNDArray" = qattr((..., [1, 2, 1]))
yzz: "GraphicsNDArray" = qattr((..., [1, 2, 2]))
yzw: "GraphicsNDArray" = qattr((..., [1, 2, 3]))
ywx: "GraphicsNDArray" = qattr((..., [1, 3, 0]))
ywy: "GraphicsNDArray" = qattr((..., [1, 3, 1]))
ywz: "GraphicsNDArray" = qattr((..., [1, 3, 2]))
yww: "GraphicsNDArray" = qattr((..., [1, 3, 3]))
zxx: "GraphicsNDArray" = qattr((..., [2, 0, 0]))
zxy: "GraphicsNDArray" = qattr((..., [2, 0, 1]))
zxz: "GraphicsNDArray" = qattr((..., [2, 0, 2]))
zxw: "GraphicsNDArray" = qattr((..., [2, 0, 3]))
zyx: "GraphicsNDArray" = qattr((..., [2, 1, 0]))
zyy: "GraphicsNDArray" = qattr((..., [2, 1, 1]))
zyz: "GraphicsNDArray" = qattr((..., [2, 1, 2]))
zyw: "GraphicsNDArray" = qattr((..., [2, 1, 3]))
zzx: "GraphicsNDArray" = qattr((..., [2, 2, 0]))
zzy: "GraphicsNDArray" = qattr((..., [2, 2, 1]))
zzz: "GraphicsNDArray" = qattr((..., [2, 2, 2]))
zzw: "GraphicsNDArray" = qattr((..., [2, 2, 3]))
zwx: "GraphicsNDArray" = qattr((..., [2, 3, 0]))
zwy: "GraphicsNDArray" = qattr((..., [2, 3, 1]))
zwz: "GraphicsNDArray" = qattr((..., [2, 3, 2]))
zww: "GraphicsNDArray" = qattr((..., [2, 3, 3]))
wxx: "GraphicsNDArray" = qattr((..., [3, 0, 0]))
wxy: "GraphicsNDArray" = qattr((..., [3, 0, 1]))
wxz: "GraphicsNDArray" = qattr((..., [3, 0, 2]))
wxw: "GraphicsNDArray" = qattr((..., [3, 0, 3]))
wyx: "GraphicsNDArray" = qattr((..., [3, 1, 0]))
wyy: "GraphicsNDArray" = qattr((..., [3, 1, 1]))
wyz: "GraphicsNDArray" = qattr((..., [3, 1, 2]))
wyw: "GraphicsNDArray" = qattr((..., [3, 1, 3]))
wzx: "GraphicsNDArray" = qattr((..., [3, 2, 0]))
wzy: "GraphicsNDArray" = qattr((..., [3, 2, 1]))
wzz: "GraphicsNDArray" = qattr((..., [3, 2, 2]))
wzw: "GraphicsNDArray" = qattr((..., [3, 2, 3]))
wwx: "GraphicsNDArray" = qattr((..., [3, 3, 0]))
wwy: "GraphicsNDArray" = qattr((..., [3, 3, 1]))
wwz: "GraphicsNDArray" = qattr((..., [3, 3, 2]))
www: "GraphicsNDArray" = qattr((..., [3, 3, 3]))
xxxx: "GraphicsNDArray" = qattr((..., [0, 0, 0, 0]))
xxxy: "GraphicsNDArray" = qattr((..., [0, 0, 0, 1]))
xxxz: "GraphicsNDArray" = qattr((..., [0, 0, 0, 2]))
xxxw: "GraphicsNDArray" = qattr((..., [0, 0, 0, 3]))
xxyx: "GraphicsNDArray" = qattr((..., [0, 0, 1, 0]))
xxyy: "GraphicsNDArray" = qattr((..., [0, 0, 1, 1]))
xxyz: "GraphicsNDArray" = qattr((..., [0, 0, 1, 2]))
xxyw: "GraphicsNDArray" = qattr((..., [0, 0, 1, 3]))
xxzx: "GraphicsNDArray" = qattr((..., [0, 0, 2, 0]))
xxzy: "GraphicsNDArray" = qattr((..., [0, 0, 2, 1]))
xxzz: "GraphicsNDArray" = qattr((..., [0, 0, 2, 2]))
xxzw: "GraphicsNDArray" = qattr((..., [0, 0, 2, 3]))
xxwx: "GraphicsNDArray" = qattr((..., [0, 0, 3, 0]))
xxwy: "GraphicsNDArray" = qattr((..., [0, 0, 3, 1]))
xxwz: "GraphicsNDArray" = qattr((..., [0, 0, 3, 2]))
xxww: "GraphicsNDArray" = qattr((..., [0, 0, 3, 3]))
xyxx: "GraphicsNDArray" = qattr((..., [0, 1, 0, 0]))
xyxy: "GraphicsNDArray" = qattr((..., [0, 1, 0, 1]))
xyxz: "GraphicsNDArray" = qattr((..., [0, 1, 0, 2]))
xyxw: "GraphicsNDArray" = qattr((..., [0, 1, 0, 3]))
xyyx: "GraphicsNDArray" = qattr((..., [0, 1, 1, 0]))
xyyy: "GraphicsNDArray" = qattr((..., [0, 1, 1, 1]))
xyyz: "GraphicsNDArray" = qattr((..., [0, 1, 1, 2]))
xyyw: "GraphicsNDArray" = qattr((..., [0, 1, 1, 3]))
xyzx: "GraphicsNDArray" = qattr((..., [0, 1, 2, 0]))
xyzy: "GraphicsNDArray" = qattr((..., [0, 1, 2, 1]))
xyzz: "GraphicsNDArray" = qattr((..., [0, 1, 2, 2]))
xyzw: "GraphicsNDArray" = qattr((..., [0, 1, 2, 3]))
xywx: "GraphicsNDArray" = qattr((..., [0, 1, 3, 0]))
xywy: "GraphicsNDArray" = qattr((..., [0, 1, 3, 1]))
xywz: "GraphicsNDArray" = qattr((..., [0, 1, 3, 2]))
xyww: "GraphicsNDArray" = qattr((..., [0, 1, 3, 3]))
xzxx: "GraphicsNDArray" = qattr((..., [0, 2, 0, 0]))
xzxy: "GraphicsNDArray" = qattr((..., [0, 2, 0, 1]))
xzxz: "GraphicsNDArray" = qattr((..., [0, 2, 0, 2]))
xzxw: "GraphicsNDArray" = qattr((..., [0, 2, 0, 3]))
xzyx: "GraphicsNDArray" = qattr((..., [0, 2, 1, 0]))
xzyy: "GraphicsNDArray" = qattr((..., [0, 2, 1, 1]))
xzyz: "GraphicsNDArray" = qattr((..., [0, 2, 1, 2]))
xzyw: "GraphicsNDArray" = qattr((..., [0, 2, 1, 3]))
xzzx: "GraphicsNDArray" = qattr((..., [0, 2, 2, 0]))
xzzy: "GraphicsNDArray" = qattr((..., [0, 2, 2, 1]))
xzzz: "GraphicsNDArray" = qattr((..., [0, 2, 2, 2]))
xzzw: "GraphicsNDArray" = qattr((..., [0, 2, 2, 3]))
xzwx: "GraphicsNDArray" = qattr((..., [0, 2, 3, 0]))
xzwy: "GraphicsNDArray" = qattr((..., [0, 2, 3, 1]))
xzwz: "GraphicsNDArray" = qattr((..., [0, 2, 3, 2]))
xzww: "GraphicsNDArray" = qattr((..., [0, 2, 3, 3]))
xwxx: "GraphicsNDArray" = qattr((..., [0, 3, 0, 0]))
xwxy: "GraphicsNDArray" = qattr((..., [0, 3, 0, 1]))
xwxz: "GraphicsNDArray" = qattr((..., [0, 3, 0, 2]))
xwxw: "GraphicsNDArray" = qattr((..., [0, 3, 0, 3]))
xwyx: "GraphicsNDArray" = qattr((..., [0, 3, 1, 0]))
xwyy: "GraphicsNDArray" = qattr((..., [0, 3, 1, 1]))
xwyz: "GraphicsNDArray" = qattr((..., [0, 3, 1, 2]))
xwyw: "GraphicsNDArray" = qattr((..., [0, 3, 1, 3]))
xwzx: "GraphicsNDArray" = qattr((..., [0, 3, 2, 0]))
xwzy: "GraphicsNDArray" = qattr((..., [0, 3, 2, 1]))
xwzz: "GraphicsNDArray" = qattr((..., [0, 3, 2, 2]))
xwzw: "GraphicsNDArray" = qattr((..., [0, 3, 2, 3]))
xwwx: "GraphicsNDArray" = qattr((..., [0, 3, 3, 0]))
xwwy: "GraphicsNDArray" = qattr((..., [0, 3, 3, 1]))
xwwz: "GraphicsNDArray" = qattr((..., [0, 3, 3, 2]))
xwww: "GraphicsNDArray" = qattr((..., [0, 3, 3, 3]))
yxxx: "GraphicsNDArray" = qattr((..., [1, 0, 0, 0]))
yxxy: "GraphicsNDArray" = qattr((..., [1, 0, 0, 1]))
yxxz: "GraphicsNDArray" = qattr((..., [1, 0, 0, 2]))
yxxw: "GraphicsNDArray" = qattr((..., [1, 0, 0, 3]))
yxyx: "GraphicsNDArray" = qattr((..., [1, 0, 1, 0]))
yxyy: "GraphicsNDArray" = qattr((..., [1, 0, 1, 1]))
yxyz: "GraphicsNDArray" = qattr((..., [1, 0, 1, 2]))
yxyw: "GraphicsNDArray" = qattr((..., [1, 0, 1, 3]))
yxzx: "GraphicsNDArray" = qattr((..., [1, 0, 2, 0]))
yxzy: "GraphicsNDArray" = qattr((..., [1, 0, 2, 1]))
yxzz: "GraphicsNDArray" = qattr((..., [1, 0, 2, 2]))
yxzw: "GraphicsNDArray" = qattr((..., [1, 0, 2, 3]))
yxwx: "GraphicsNDArray" = qattr((..., [1, 0, 3, 0]))
yxwy: "GraphicsNDArray" = qattr((..., [1, 0, 3, 1]))
yxwz: "GraphicsNDArray" = qattr((..., [1, 0, 3, 2]))
yxww: "GraphicsNDArray" = qattr((..., [1, 0, 3, 3]))
yyxx: "GraphicsNDArray" = qattr((..., [1, 1, 0, 0]))
yyxy: "GraphicsNDArray" = qattr((..., [1, 1, 0, 1]))
yyxz: "GraphicsNDArray" = qattr((..., [1, 1, 0, 2]))
yyxw: "GraphicsNDArray" = qattr((..., [1, 1, 0, 3]))
yyyx: "GraphicsNDArray" = qattr((..., [1, 1, 1, 0]))
yyyy: "GraphicsNDArray" = qattr((..., [1, 1, 1, 1]))
yyyz: "GraphicsNDArray" = qattr((..., [1, 1, 1, 2]))
yyyw: "GraphicsNDArray" = qattr((..., [1, 1, 1, 3]))
yyzx: "GraphicsNDArray" = qattr((..., [1, 1, 2, 0]))
yyzy: "GraphicsNDArray" = qattr((..., [1, 1, 2, 1]))
yyzz: "GraphicsNDArray" = qattr((..., [1, 1, 2, 2]))
yyzw: "GraphicsNDArray" = qattr((..., [1, 1, 2, 3]))
yywx: "GraphicsNDArray" = qattr((..., [1, 1, 3, 0]))
yywy: "GraphicsNDArray" = qattr((..., [1, 1, 3, 1]))
yywz: "GraphicsNDArray" = qattr((..., [1, 1, 3, 2]))
yyww: "GraphicsNDArray" = qattr((..., [1, 1, 3, 3]))
yzxx: "GraphicsNDArray" = qattr((..., [1, 2, 0, 0]))
yzxy: "GraphicsNDArray" = qattr((..., [1, 2, 0, 1]))
yzxz: "GraphicsNDArray" = qattr((..., [1, 2, 0, 2]))
yzxw: "GraphicsNDArray" = qattr((..., [1, 2, 0, 3]))
yzyx: "GraphicsNDArray" = qattr((..., [1, 2, 1, 0]))
yzyy: "GraphicsNDArray" = qattr((..., [1, 2, 1, 1]))
yzyz: "GraphicsNDArray" = qattr((..., [1, 2, 1, 2]))
yzyw: "GraphicsNDArray" = qattr((..., [1, 2, 1, 3]))
yzzx: "GraphicsNDArray" = qattr((..., [1, 2, 2, 0]))
yzzy: "GraphicsNDArray" = qattr((..., [1, 2, 2, 1]))
yzzz: "GraphicsNDArray" = qattr((..., [1, 2, 2, 2]))
yzzw: "GraphicsNDArray" = qattr((..., [1, 2, 2, 3]))
yzwx: "GraphicsNDArray" = qattr((..., [1, 2, 3, 0]))
yzwy: "GraphicsNDArray" = qattr((..., [1, 2, 3, 1]))
yzwz: "GraphicsNDArray" = qattr((..., [1, 2, 3, 2]))
yzww: "GraphicsNDArray" = qattr((..., [1, 2, 3, 3]))
ywxx: "GraphicsNDArray" = qattr((..., [1, 3, 0, 0]))
ywxy: "GraphicsNDArray" = qattr((..., [1, 3, 0, 1]))
ywxz: "GraphicsNDArray" = qattr((..., [1, 3, 0, 2]))
ywxw: "GraphicsNDArray" = qattr((..., [1, 3, 0, 3]))
ywyx: "GraphicsNDArray" = qattr((..., [1, 3, 1, 0]))
ywyy: "GraphicsNDArray" = qattr((..., [1, 3, 1, 1]))
ywyz: "GraphicsNDArray" = qattr((..., [1, 3, 1, 2]))
ywyw: "GraphicsNDArray" = qattr((..., [1, 3, 1, 3]))
ywzx: "GraphicsNDArray" = qattr((..., [1, 3, 2, 0]))
ywzy: "GraphicsNDArray" = qattr((..., [1, 3, 2, 1]))
ywzz: "GraphicsNDArray" = qattr((..., [1, 3, 2, 2]))
ywzw: "GraphicsNDArray" = qattr((..., [1, 3, 2, 3]))
ywwx: "GraphicsNDArray" = qattr((..., [1, 3, 3, 0]))
ywwy: "GraphicsNDArray" = qattr((..., [1, 3, 3, 1]))
ywwz: "GraphicsNDArray" = qattr((..., [1, 3, 3, 2]))
ywww: "GraphicsNDArray" = qattr((..., [1, 3, 3, 3]))
zxxx: "GraphicsNDArray" = qattr((..., [2, 0, 0, 0]))
zxxy: "GraphicsNDArray" = qattr((..., [2, 0, 0, 1]))
zxxz: "GraphicsNDArray" = qattr((..., [2, 0, 0, 2]))
zxxw: "GraphicsNDArray" = qattr((..., [2, 0, 0, 3]))
zxyx: "GraphicsNDArray" = qattr((..., [2, 0, 1, 0]))
zxyy: "GraphicsNDArray" = qattr((..., [2, 0, 1, 1]))
zxyz: "GraphicsNDArray" = qattr((..., [2, 0, 1, 2]))
zxyw: "GraphicsNDArray" = qattr((..., [2, 0, 1, 3]))
zxzx: "GraphicsNDArray" = qattr((..., [2, 0, 2, 0]))
zxzy: "GraphicsNDArray" = qattr((..., [2, 0, 2, 1]))
zxzz: "GraphicsNDArray" = qattr((..., [2, 0, 2, 2]))
zxzw: "GraphicsNDArray" = qattr((..., [2, 0, 2, 3]))
zxwx: "GraphicsNDArray" = qattr((..., [2, 0, 3, 0]))
zxwy: "GraphicsNDArray" = qattr((..., [2, 0, 3, 1]))
zxwz: "GraphicsNDArray" = qattr((..., [2, 0, 3, 2]))
zxww: "GraphicsNDArray" = qattr((..., [2, 0, 3, 3]))
zyxx: "GraphicsNDArray" = qattr((..., [2, 1, 0, 0]))
zyxy: "GraphicsNDArray" = qattr((..., [2, 1, 0, 1]))
zyxz: "GraphicsNDArray" = qattr((..., [2, 1, 0, 2]))
zyxw: "GraphicsNDArray" = qattr((..., [2, 1, 0, 3]))
zyyx: "GraphicsNDArray" = qattr((..., [2, 1, 1, 0]))
zyyy: "GraphicsNDArray" = qattr((..., [2, 1, 1, 1]))
zyyz: "GraphicsNDArray" = qattr((..., [2, 1, 1, 2]))
zyyw: "GraphicsNDArray" = qattr((..., [2, 1, 1, 3]))
zyzx: "GraphicsNDArray" = qattr((..., [2, 1, 2, 0]))
zyzy: "GraphicsNDArray" = qattr((..., [2, 1, 2, 1]))
zyzz: "GraphicsNDArray" = qattr((..., [2, 1, 2, 2]))
zyzw: "GraphicsNDArray" = qattr((..., [2, 1, 2, 3]))
zywx: "GraphicsNDArray" = qattr((..., [2, 1, 3, 0]))
zywy: "GraphicsNDArray" = qattr((..., [2, 1, 3, 1]))
zywz: "GraphicsNDArray" = qattr((..., [2, 1, 3, 2]))
zyww: "GraphicsNDArray" = qattr((..., [2, 1, 3, 3]))
zzxx: "GraphicsNDArray" = qattr((..., [2, 2, 0, 0]))
zzxy: "GraphicsNDArray" = qattr((..., [2, 2, 0, 1]))
zzxz: "GraphicsNDArray" = qattr((..., [2, 2, 0, 2]))
zzxw: "GraphicsNDArray" = qattr((..., [2, 2, 0, 3]))
zzyx: "GraphicsNDArray" = qattr((..., [2, 2, 1, 0]))
zzyy: "GraphicsNDArray" = qattr((..., [2, 2, 1, 1]))
zzyz: "GraphicsNDArray" = qattr((..., [2, 2, 1, 2]))
zzyw: "GraphicsNDArray" = qattr((..., [2, 2, 1, 3]))
zzzx: "GraphicsNDArray" = qattr((..., [2, 2, 2, 0]))
zzzy: "GraphicsNDArray" = qattr((..., [2, 2, 2, 1]))
zzzz: "GraphicsNDArray" = qattr((..., [2, 2, 2, 2]))
zzzw: "GraphicsNDArray" = qattr((..., [2, 2, 2, 3]))
zzwx: "GraphicsNDArray" = qattr((..., [2, 2, 3, 0]))
zzwy: "GraphicsNDArray" = qattr((..., [2, 2, 3, 1]))
zzwz: "GraphicsNDArray" = qattr((..., [2, 2, 3, 2]))
zzww: "GraphicsNDArray" = qattr((..., [2, 2, 3, 3]))
zwxx: "GraphicsNDArray" = qattr((..., [2, 3, 0, 0]))
zwxy: "GraphicsNDArray" = qattr((..., [2, 3, 0, 1]))
zwxz: "GraphicsNDArray" = qattr((..., [2, 3, 0, 2]))
zwxw: "GraphicsNDArray" = qattr((..., [2, 3, 0, 3]))
zwyx: "GraphicsNDArray" = qattr((..., [2, 3, 1, 0]))
zwyy: "GraphicsNDArray" = qattr((..., [2, 3, 1, 1]))
zwyz: "GraphicsNDArray" = qattr((..., [2, 3, 1, 2]))
zwyw: "GraphicsNDArray" = qattr((..., [2, 3, 1, 3]))
zwzx: "GraphicsNDArray" = qattr((..., [2, 3, 2, 0]))
zwzy: "GraphicsNDArray" = qattr((..., [2, 3, 2, 1]))
zwzz: "GraphicsNDArray" = qattr((..., [2, 3, 2, 2]))
zwzw: "GraphicsNDArray" = qattr((..., [2, 3, 2, 3]))
zwwx: "GraphicsNDArray" = qattr((..., [2, 3, 3, 0]))
zwwy: "GraphicsNDArray" = qattr((..., [2, 3, 3, 1]))
zwwz: "GraphicsNDArray" = qattr((..., [2, 3, 3, 2]))
zwww: "GraphicsNDArray" = qattr((..., [2, 3, 3, 3]))
wxxx: "GraphicsNDArray" = qattr((..., [3, 0, 0, 0]))
wxxy: "GraphicsNDArray" = qattr((..., [3, 0, 0, 1]))
wxxz: "GraphicsNDArray" = qattr((..., [3, 0, 0, 2]))
wxxw: "GraphicsNDArray" = qattr((..., [3, 0, 0, 3]))
wxyx: "GraphicsNDArray" = qattr((..., [3, 0, 1, 0]))
wxyy: "GraphicsNDArray" = qattr((..., [3, 0, 1, 1]))
wxyz: "GraphicsNDArray" = qattr((..., [3, 0, 1, 2]))
wxyw: "GraphicsNDArray" = qattr((..., [3, 0, 1, 3]))
wxzx: "GraphicsNDArray" = qattr((..., [3, 0, 2, 0]))
wxzy: "GraphicsNDArray" = qattr((..., [3, 0, 2, 1]))
wxzz: "GraphicsNDArray" = qattr((..., [3, 0, 2, 2]))
wxzw: "GraphicsNDArray" = qattr((..., [3, 0, 2, 3]))
wxwx: "GraphicsNDArray" = qattr((..., [3, 0, 3, 0]))
wxwy: "GraphicsNDArray" = qattr((..., [3, 0, 3, 1]))
wxwz: "GraphicsNDArray" = qattr((..., [3, 0, 3, 2]))
wxww: "GraphicsNDArray" = qattr((..., [3, 0, 3, 3]))
wyxx: "GraphicsNDArray" = qattr((..., [3, 1, 0, 0]))
wyxy: "GraphicsNDArray" = qattr((..., [3, 1, 0, 1]))
wyxz: "GraphicsNDArray" = qattr((..., [3, 1, 0, 2]))
wyxw: "GraphicsNDArray" = qattr((..., [3, 1, 0, 3]))
wyyx: "GraphicsNDArray" = qattr((..., [3, 1, 1, 0]))
wyyy: "GraphicsNDArray" = qattr((..., [3, 1, 1, 1]))
wyyz: "GraphicsNDArray" = qattr((..., [3, 1, 1, 2]))
wyyw: "GraphicsNDArray" = qattr((..., [3, 1, 1, 3]))
wyzx: "GraphicsNDArray" = qattr((..., [3, 1, 2, 0]))
wyzy: "GraphicsNDArray" = qattr((..., [3, 1, 2, 1]))
wyzz: "GraphicsNDArray" = qattr((..., [3, 1, 2, 2]))
wyzw: "GraphicsNDArray" = qattr((..., [3, 1, 2, 3]))
wywx: "GraphicsNDArray" = qattr((..., [3, 1, 3, 0]))
wywy: "GraphicsNDArray" = qattr((..., [3, 1, 3, 1]))
wywz: "GraphicsNDArray" = qattr((..., [3, 1, 3, 2]))
wyww: "GraphicsNDArray" = qattr((..., [3, 1, 3, 3]))
wzxx: "GraphicsNDArray" = qattr((..., [3, 2, 0, 0]))
wzxy: "GraphicsNDArray" = qattr((..., [3, 2, 0, 1]))
wzxz: "GraphicsNDArray" = qattr((..., [3, 2, 0, 2]))
wzxw: "GraphicsNDArray" = qattr((..., [3, 2, 0, 3]))
wzyx: "GraphicsNDArray" = qattr((..., [3, 2, 1, 0]))
wzyy: "GraphicsNDArray" = qattr((..., [3, 2, 1, 1]))
wzyz: "GraphicsNDArray" = qattr((..., [3, 2, 1, 2]))
wzyw: "GraphicsNDArray" = qattr((..., [3, 2, 1, 3]))
wzzx: "GraphicsNDArray" = qattr((..., [3, 2, 2, 0]))
wzzy: "GraphicsNDArray" = qattr((..., [3, 2, 2, 1]))
wzzz: "GraphicsNDArray" = qattr((..., [3, 2, 2, 2]))
wzzw: "GraphicsNDArray" = qattr((..., [3, 2, 2, 3]))
wzwx: "GraphicsNDArray" = qattr((..., [3, 2, 3, 0]))
wzwy: "GraphicsNDArray" = qattr((..., [3, 2, 3, 1]))
wzwz: "GraphicsNDArray" = qattr((..., [3, 2, 3, 2]))
wzww: "GraphicsNDArray" = qattr((..., [3, 2, 3, 3]))
wwxx: "GraphicsNDArray" = qattr((..., [3, 3, 0, 0]))
wwxy: "GraphicsNDArray" = qattr((..., [3, 3, 0, 1]))
wwxz: "GraphicsNDArray" = qattr((..., [3, 3, 0, 2]))
wwxw: "GraphicsNDArray" = qattr((..., [3, 3, 0, 3]))
wwyx: "GraphicsNDArray" = qattr((..., [3, 3, 1, 0]))
wwyy: "GraphicsNDArray" = qattr((..., [3, 3, 1, 1]))
wwyz: "GraphicsNDArray" = qattr((..., [3, 3, 1, 2]))
wwyw: "GraphicsNDArray" = qattr((..., [3, 3, 1, 3]))
wwzx: "GraphicsNDArray" = qattr((..., [3, 3, 2, 0]))
wwzy: "GraphicsNDArray" = qattr((..., [3, 3, 2, 1]))
wwzz: "GraphicsNDArray" = qattr((..., [3, 3, 2, 2]))
wwzw: "GraphicsNDArray" = qattr((..., [3, 3, 2, 3]))
wwwx: "GraphicsNDArray" = qattr((..., [3, 3, 3, 0]))
wwwy: "GraphicsNDArray" = qattr((..., [3, 3, 3, 1]))
wwwz: "GraphicsNDArray" = qattr((..., [3, 3, 3, 2]))
wwww: "GraphicsNDArray" = qattr((..., [3, 3, 3, 3]))
r: "GraphicsNDArray" = qattr((..., [0]))
g: "GraphicsNDArray" = qattr((..., [1]))
b: "GraphicsNDArray" = qattr((..., [2]))
a: "GraphicsNDArray" = qattr((..., [3]))
rr: "GraphicsNDArray" = qattr((..., [0, 0]))
rg: "GraphicsNDArray" = qattr((..., [0, 1]))
rb: "GraphicsNDArray" = qattr((..., [0, 2]))
ra: "GraphicsNDArray" = qattr((..., [0, 3]))
gr: "GraphicsNDArray" = qattr((..., [1, 0]))
gg: "GraphicsNDArray" = qattr((..., [1, 1]))
gb: "GraphicsNDArray" = qattr((..., [1, 2]))
ga: "GraphicsNDArray" = qattr((..., [1, 3]))
br: "GraphicsNDArray" = qattr((..., [2, 0]))
bg: "GraphicsNDArray" = qattr((..., [2, 1]))
bb: "GraphicsNDArray" = qattr((..., [2, 2]))
ba: "GraphicsNDArray" = qattr((..., [2, 3]))
ar: "GraphicsNDArray" = qattr((..., [3, 0]))
ag: "GraphicsNDArray" = qattr((..., [3, 1]))
ab: "GraphicsNDArray" = qattr((..., [3, 2]))
aa: "GraphicsNDArray" = qattr((..., [3, 3]))
rrr: "GraphicsNDArray" = qattr((..., [0, 0, 0]))
rrg: "GraphicsNDArray" = qattr((..., [0, 0, 1]))
rrb: "GraphicsNDArray" = qattr((..., [0, 0, 2]))
rra: "GraphicsNDArray" = qattr((..., [0, 0, 3]))
rgr: "GraphicsNDArray" = qattr((..., [0, 1, 0]))
rgg: "GraphicsNDArray" = qattr((..., [0, 1, 1]))
rgb: "GraphicsNDArray" = qattr((..., [0, 1, 2]))
rga: "GraphicsNDArray" = qattr((..., [0, 1, 3]))
rbr: "GraphicsNDArray" = qattr((..., [0, 2, 0]))
rbg: "GraphicsNDArray" = qattr((..., [0, 2, 1]))
rbb: "GraphicsNDArray" = qattr((..., [0, 2, 2]))
rba: "GraphicsNDArray" = qattr((..., [0, 2, 3]))
rar: "GraphicsNDArray" = qattr((..., [0, 3, 0]))
rag: "GraphicsNDArray" = qattr((..., [0, 3, 1]))
rab: "GraphicsNDArray" = qattr((..., [0, 3, 2]))
raa: "GraphicsNDArray" = qattr((..., [0, 3, 3]))
grr: "GraphicsNDArray" = qattr((..., [1, 0, 0]))
grg: "GraphicsNDArray" = qattr((..., [1, 0, 1]))
grb: "GraphicsNDArray" = qattr((..., [1, 0, 2]))
gra: "GraphicsNDArray" = qattr((..., [1, 0, 3]))
ggr: "GraphicsNDArray" = qattr((..., [1, 1, 0]))
ggg: "GraphicsNDArray" = qattr((..., [1, 1, 1]))
ggb: "GraphicsNDArray" = qattr((..., [1, 1, 2]))
gga: "GraphicsNDArray" = qattr((..., [1, 1, 3]))
gbr: "GraphicsNDArray" = qattr((..., [1, 2, 0]))
gbg: "GraphicsNDArray" = qattr((..., [1, 2, 1]))
gbb: "GraphicsNDArray" = qattr((..., [1, 2, 2]))
gba: "GraphicsNDArray" = qattr((..., [1, 2, 3]))
gar: "GraphicsNDArray" = qattr((..., [1, 3, 0]))
gag: "GraphicsNDArray" = qattr((..., [1, 3, 1]))
gab: "GraphicsNDArray" = qattr((..., [1, 3, 2]))
gaa: "GraphicsNDArray" = qattr((..., [1, 3, 3]))
brr: "GraphicsNDArray" = qattr((..., [2, 0, 0]))
brg: "GraphicsNDArray" = qattr((..., [2, 0, 1]))
brb: "GraphicsNDArray" = qattr((..., [2, 0, 2]))
bra: "GraphicsNDArray" = qattr((..., [2, 0, 3]))
bgr: "GraphicsNDArray" = qattr((..., [2, 1, 0]))
bgg: "GraphicsNDArray" = qattr((..., [2, 1, 1]))
bgb: "GraphicsNDArray" = qattr((..., [2, 1, 2]))
bga: "GraphicsNDArray" = qattr((..., [2, 1, 3]))
bbr: "GraphicsNDArray" = qattr((..., [2, 2, 0]))
bbg: "GraphicsNDArray" = qattr((..., [2, 2, 1]))
bbb: "GraphicsNDArray" = qattr((..., [2, 2, 2]))
bba: "GraphicsNDArray" = qattr((..., [2, 2, 3]))
bar: "GraphicsNDArray" = qattr((..., [2, 3, 0]))
bag: "GraphicsNDArray" = qattr((..., [2, 3, 1]))
bab: "GraphicsNDArray" = qattr((..., [2, 3, 2]))
baa: "GraphicsNDArray" = qattr((..., [2, 3, 3]))
arr: "GraphicsNDArray" = qattr((..., [3, 0, 0]))
arg: "GraphicsNDArray" = qattr((..., [3, 0, 1]))
arb: "GraphicsNDArray" = qattr((..., [3, 0, 2]))
ara: "GraphicsNDArray" = qattr((..., [3, 0, 3]))
agr: "GraphicsNDArray" = qattr((..., [3, 1, 0]))
agg: "GraphicsNDArray" = qattr((..., [3, 1, 1]))
agb: "GraphicsNDArray" = qattr((..., [3, 1, 2]))
aga: "GraphicsNDArray" = qattr((..., [3, 1, 3]))
abr: "GraphicsNDArray" = qattr((..., [3, 2, 0]))
abg: "GraphicsNDArray" = qattr((..., [3, 2, 1]))
abb: "GraphicsNDArray" = qattr((..., [3, 2, 2]))
aba: "GraphicsNDArray" = qattr((..., [3, 2, 3]))
aar: "GraphicsNDArray" = qattr((..., [3, 3, 0]))
aag: "GraphicsNDArray" = qattr((..., [3, 3, 1]))
aab: "GraphicsNDArray" = qattr((..., [3, 3, 2]))
aaa: "GraphicsNDArray" = qattr((..., [3, 3, 3]))
rrrr: "GraphicsNDArray" = qattr((..., [0, 0, 0, 0]))
rrrg: "GraphicsNDArray" = qattr((..., [0, 0, 0, 1]))
rrrb: "GraphicsNDArray" = qattr((..., [0, 0, 0, 2]))
rrra: "GraphicsNDArray" = qattr((..., [0, 0, 0, 3]))
rrgr: "GraphicsNDArray" = qattr((..., [0, 0, 1, 0]))
rrgg: "GraphicsNDArray" = qattr((..., [0, 0, 1, 1]))
rrgb: "GraphicsNDArray" = qattr((..., [0, 0, 1, 2]))
rrga: "GraphicsNDArray" = qattr((..., [0, 0, 1, 3]))
rrbr: "GraphicsNDArray" = qattr((..., [0, 0, 2, 0]))
rrbg: "GraphicsNDArray" = qattr((..., [0, 0, 2, 1]))
rrbb: "GraphicsNDArray" = qattr((..., [0, 0, 2, 2]))
rrba: "GraphicsNDArray" = qattr((..., [0, 0, 2, 3]))
rrar: "GraphicsNDArray" = qattr((..., [0, 0, 3, 0]))
rrag: "GraphicsNDArray" = qattr((..., [0, 0, 3, 1]))
rrab: "GraphicsNDArray" = qattr((..., [0, 0, 3, 2]))
rraa: "GraphicsNDArray" = qattr((..., [0, 0, 3, 3]))
rgrr: "GraphicsNDArray" = qattr((..., [0, 1, 0, 0]))
rgrg: "GraphicsNDArray" = qattr((..., [0, 1, 0, 1]))
rgrb: "GraphicsNDArray" = qattr((..., [0, 1, 0, 2]))
rgra: "GraphicsNDArray" = qattr((..., [0, 1, 0, 3]))
rggr: "GraphicsNDArray" = qattr((..., [0, 1, 1, 0]))
rggg: "GraphicsNDArray" = qattr((..., [0, 1, 1, 1]))
rggb: "GraphicsNDArray" = qattr((..., [0, 1, 1, 2]))
rgga: "GraphicsNDArray" = qattr((..., [0, 1, 1, 3]))
rgbr: "GraphicsNDArray" = qattr((..., [0, 1, 2, 0]))
rgbg: "GraphicsNDArray" = qattr((..., [0, 1, 2, 1]))
rgbb: "GraphicsNDArray" = qattr((..., [0, 1, 2, 2]))
rgba: "GraphicsNDArray" = qattr((..., [0, 1, 2, 3]))
rgar: "GraphicsNDArray" = qattr((..., [0, 1, 3, 0]))
rgag: "GraphicsNDArray" = qattr((..., [0, 1, 3, 1]))
rgab: "GraphicsNDArray" = qattr((..., [0, 1, 3, 2]))
rgaa: "GraphicsNDArray" = qattr((..., [0, 1, 3, 3]))
rbrr: "GraphicsNDArray" = qattr((..., [0, 2, 0, 0]))
rbrg: "GraphicsNDArray" = qattr((..., [0, 2, 0, 1]))
rbrb: "GraphicsNDArray" = qattr((..., [0, 2, 0, 2]))
rbra: "GraphicsNDArray" = qattr((..., [0, 2, 0, 3]))
rbgr: "GraphicsNDArray" = qattr((..., [0, 2, 1, 0]))
rbgg: "GraphicsNDArray" = qattr((..., [0, 2, 1, 1]))
rbgb: "GraphicsNDArray" = qattr((..., [0, 2, 1, 2]))
rbga: "GraphicsNDArray" = qattr((..., [0, 2, 1, 3]))
rbbr: "GraphicsNDArray" = qattr((..., [0, 2, 2, 0]))
rbbg: "GraphicsNDArray" = qattr((..., [0, 2, 2, 1]))
rbbb: "GraphicsNDArray" = qattr((..., [0, 2, 2, 2]))
rbba: "GraphicsNDArray" = qattr((..., [0, 2, 2, 3]))
rbar: "GraphicsNDArray" = qattr((..., [0, 2, 3, 0]))
rbag: "GraphicsNDArray" = qattr((..., [0, 2, 3, 1]))
rbab: "GraphicsNDArray" = qattr((..., [0, 2, 3, 2]))
rbaa: "GraphicsNDArray" = qattr((..., [0, 2, 3, 3]))
rarr: "GraphicsNDArray" = qattr((..., [0, 3, 0, 0]))
rarg: "GraphicsNDArray" = qattr((..., [0, 3, 0, 1]))
rarb: "GraphicsNDArray" = qattr((..., [0, 3, 0, 2]))
rara: "GraphicsNDArray" = qattr((..., [0, 3, 0, 3]))
ragr: "GraphicsNDArray" = qattr((..., [0, 3, 1, 0]))
ragg: "GraphicsNDArray" = qattr((..., [0, 3, 1, 1]))
ragb: "GraphicsNDArray" = qattr((..., [0, 3, 1, 2]))
raga: "GraphicsNDArray" = qattr((..., [0, 3, 1, 3]))
rabr: "GraphicsNDArray" = qattr((..., [0, 3, 2, 0]))
rabg: "GraphicsNDArray" = qattr((..., [0, 3, 2, 1]))
rabb: "GraphicsNDArray" = qattr((..., [0, 3, 2, 2]))
raba: "GraphicsNDArray" = qattr((..., [0, 3, 2, 3]))
raar: "GraphicsNDArray" = qattr((..., [0, 3, 3, 0]))
raag: "GraphicsNDArray" = qattr((..., [0, 3, 3, 1]))
raab: "GraphicsNDArray" = qattr((..., [0, 3, 3, 2]))
raaa: "GraphicsNDArray" = qattr((..., [0, 3, 3, 3]))
grrr: "GraphicsNDArray" = qattr((..., [1, 0, 0, 0]))
grrg: "GraphicsNDArray" = qattr((..., [1, 0, 0, 1]))
grrb: "GraphicsNDArray" = qattr((..., [1, 0, 0, 2]))
grra: "GraphicsNDArray" = qattr((..., [1, 0, 0, 3]))
grgr: "GraphicsNDArray" = qattr((..., [1, 0, 1, 0]))
grgg: "GraphicsNDArray" = qattr((..., [1, 0, 1, 1]))
grgb: "GraphicsNDArray" = qattr((..., [1, 0, 1, 2]))
grga: "GraphicsNDArray" = qattr((..., [1, 0, 1, 3]))
grbr: "GraphicsNDArray" = qattr((..., [1, 0, 2, 0]))
grbg: "GraphicsNDArray" = qattr((..., [1, 0, 2, 1]))
grbb: "GraphicsNDArray" = qattr((..., [1, 0, 2, 2]))
grba: "GraphicsNDArray" = qattr((..., [1, 0, 2, 3]))
grar: "GraphicsNDArray" = qattr((..., [1, 0, 3, 0]))
grag: "GraphicsNDArray" = qattr((..., [1, 0, 3, 1]))
grab: "GraphicsNDArray" = qattr((..., [1, 0, 3, 2]))
graa: "GraphicsNDArray" = qattr((..., [1, 0, 3, 3]))
ggrr: "GraphicsNDArray" = qattr((..., [1, 1, 0, 0]))
ggrg: "GraphicsNDArray" = qattr((..., [1, 1, 0, 1]))
ggrb: "GraphicsNDArray" = qattr((..., [1, 1, 0, 2]))
ggra: "GraphicsNDArray" = qattr((..., [1, 1, 0, 3]))
gggr: "GraphicsNDArray" = qattr((..., [1, 1, 1, 0]))
gggg: "GraphicsNDArray" = qattr((..., [1, 1, 1, 1]))
gggb: "GraphicsNDArray" = qattr((..., [1, 1, 1, 2]))
ggga: "GraphicsNDArray" = qattr((..., [1, 1, 1, 3]))
ggbr: "GraphicsNDArray" = qattr((..., [1, 1, 2, 0]))
ggbg: "GraphicsNDArray" = qattr((..., [1, 1, 2, 1]))
ggbb: "GraphicsNDArray" = qattr((..., [1, 1, 2, 2]))
ggba: "GraphicsNDArray" = qattr((..., [1, 1, 2, 3]))
ggar: "GraphicsNDArray" = qattr((..., [1, 1, 3, 0]))
ggag: "GraphicsNDArray" = qattr((..., [1, 1, 3, 1]))
ggab: "GraphicsNDArray" = qattr((..., [1, 1, 3, 2]))
ggaa: "GraphicsNDArray" = qattr((..., [1, 1, 3, 3]))
gbrr: "GraphicsNDArray" = qattr((..., [1, 2, 0, 0]))
gbrg: "GraphicsNDArray" = qattr((..., [1, 2, 0, 1]))
gbrb: "GraphicsNDArray" = qattr((..., [1, 2, 0, 2]))
gbra: "GraphicsNDArray" = qattr((..., [1, 2, 0, 3]))
gbgr: "GraphicsNDArray" = qattr((..., [1, 2, 1, 0]))
gbgg: "GraphicsNDArray" = qattr((..., [1, 2, 1, 1]))
gbgb: "GraphicsNDArray" = qattr((..., [1, 2, 1, 2]))
gbga: "GraphicsNDArray" = qattr((..., [1, 2, 1, 3]))
gbbr: "GraphicsNDArray" = qattr((..., [1, 2, 2, 0]))
gbbg: "GraphicsNDArray" = qattr((..., [1, 2, 2, 1]))
gbbb: "GraphicsNDArray" = qattr((..., [1, 2, 2, 2]))
gbba: "GraphicsNDArray" = qattr((..., [1, 2, 2, 3]))
gbar: "GraphicsNDArray" = qattr((..., [1, 2, 3, 0]))
gbag: "GraphicsNDArray" = qattr((..., [1, 2, 3, 1]))
gbab: "GraphicsNDArray" = qattr((..., [1, 2, 3, 2]))
gbaa: "GraphicsNDArray" = qattr((..., [1, 2, 3, 3]))
garr: "GraphicsNDArray" = qattr((..., [1, 3, 0, 0]))
garg: "GraphicsNDArray" = qattr((..., [1, 3, 0, 1]))
garb: "GraphicsNDArray" = qattr((..., [1, 3, 0, 2]))
gara: "GraphicsNDArray" = qattr((..., [1, 3, 0, 3]))
gagr: "GraphicsNDArray" = qattr((..., [1, 3, 1, 0]))
gagg: "GraphicsNDArray" = qattr((..., [1, 3, 1, 1]))
gagb: "GraphicsNDArray" = qattr((..., [1, 3, 1, 2]))
gaga: "GraphicsNDArray" = qattr((..., [1, 3, 1, 3]))
gabr: "GraphicsNDArray" = qattr((..., [1, 3, 2, 0]))
gabg: "GraphicsNDArray" = qattr((..., [1, 3, 2, 1]))
gabb: "GraphicsNDArray" = qattr((..., [1, 3, 2, 2]))
gaba: "GraphicsNDArray" = qattr((..., [1, 3, 2, 3]))
gaar: "GraphicsNDArray" = qattr((..., [1, 3, 3, 0]))
gaag: "GraphicsNDArray" = qattr((..., [1, 3, 3, 1]))
gaab: "GraphicsNDArray" = qattr((..., [1, 3, 3, 2]))
gaaa: "GraphicsNDArray" = qattr((..., [1, 3, 3, 3]))
brrr: "GraphicsNDArray" = qattr((..., [2, 0, 0, 0]))
brrg: "GraphicsNDArray" = qattr((..., [2, 0, 0, 1]))
brrb: "GraphicsNDArray" = qattr((..., [2, 0, 0, 2]))
brra: "GraphicsNDArray" = qattr((..., [2, 0, 0, 3]))
brgr: "GraphicsNDArray" = qattr((..., [2, 0, 1, 0]))
brgg: "GraphicsNDArray" = qattr((..., [2, 0, 1, 1]))
brgb: "GraphicsNDArray" = qattr((..., [2, 0, 1, 2]))
brga: "GraphicsNDArray" = qattr((..., [2, 0, 1, 3]))
brbr: "GraphicsNDArray" = qattr((..., [2, 0, 2, 0]))
brbg: "GraphicsNDArray" = qattr((..., [2, 0, 2, 1]))
brbb: "GraphicsNDArray" = qattr((..., [2, 0, 2, 2]))
brba: "GraphicsNDArray" = qattr((..., [2, 0, 2, 3]))
brar: "GraphicsNDArray" = qattr((..., [2, 0, 3, 0]))
brag: "GraphicsNDArray" = qattr((..., [2, 0, 3, 1]))
brab: "GraphicsNDArray" = qattr((..., [2, 0, 3, 2]))
braa: "GraphicsNDArray" = qattr((..., [2, 0, 3, 3]))
bgrr: "GraphicsNDArray" = qattr((..., [2, 1, 0, 0]))
bgrg: "GraphicsNDArray" = qattr((..., [2, 1, 0, 1]))
bgrb: "GraphicsNDArray" = qattr((..., [2, 1, 0, 2]))
bgra: "GraphicsNDArray" = qattr((..., [2, 1, 0, 3]))
bggr: "GraphicsNDArray" = qattr((..., [2, 1, 1, 0]))
bggg: "GraphicsNDArray" = qattr((..., [2, 1, 1, 1]))
bggb: "GraphicsNDArray" = qattr((..., [2, 1, 1, 2]))
bgga: "GraphicsNDArray" = qattr((..., [2, 1, 1, 3]))
bgbr: "GraphicsNDArray" = qattr((..., [2, 1, 2, 0]))
bgbg: "GraphicsNDArray" = qattr((..., [2, 1, 2, 1]))
bgbb: "GraphicsNDArray" = qattr((..., [2, 1, 2, 2]))
bgba: "GraphicsNDArray" = qattr((..., [2, 1, 2, 3]))
bgar: "GraphicsNDArray" = qattr((..., [2, 1, 3, 0]))
bgag: "GraphicsNDArray" = qattr((..., [2, 1, 3, 1]))
bgab: "GraphicsNDArray" = qattr((..., [2, 1, 3, 2]))
bgaa: "GraphicsNDArray" = qattr((..., [2, 1, 3, 3]))
bbrr: "GraphicsNDArray" = qattr((..., [2, 2, 0, 0]))
bbrg: "GraphicsNDArray" = qattr((..., [2, 2, 0, 1]))
bbrb: "GraphicsNDArray" = qattr((..., [2, 2, 0, 2]))
bbra: "GraphicsNDArray" = qattr((..., [2, 2, 0, 3]))
bbgr: "GraphicsNDArray" = qattr((..., [2, 2, 1, 0]))
bbgg: "GraphicsNDArray" = qattr((..., [2, 2, 1, 1]))
bbgb: "GraphicsNDArray" = qattr((..., [2, 2, 1, 2]))
bbga: "GraphicsNDArray" = qattr((..., [2, 2, 1, 3]))
bbbr: "GraphicsNDArray" = qattr((..., [2, 2, 2, 0]))
bbbg: "GraphicsNDArray" = qattr((..., [2, 2, 2, 1]))
bbbb: "GraphicsNDArray" = qattr((..., [2, 2, 2, 2]))
bbba: "GraphicsNDArray" = qattr((..., [2, 2, 2, 3]))
bbar: "GraphicsNDArray" = qattr((..., [2, 2, 3, 0]))
bbag: "GraphicsNDArray" = qattr((..., [2, 2, 3, 1]))
bbab: "GraphicsNDArray" = qattr((..., [2, 2, 3, 2]))
bbaa: "GraphicsNDArray" = qattr((..., [2, 2, 3, 3]))
barr: "GraphicsNDArray" = qattr((..., [2, 3, 0, 0]))
barg: "GraphicsNDArray" = qattr((..., [2, 3, 0, 1]))
barb: "GraphicsNDArray" = qattr((..., [2, 3, 0, 2]))
bara: "GraphicsNDArray" = qattr((..., [2, 3, 0, 3]))
bagr: "GraphicsNDArray" = qattr((..., [2, 3, 1, 0]))
bagg: "GraphicsNDArray" = qattr((..., [2, 3, 1, 1]))
bagb: "GraphicsNDArray" = qattr((..., [2, 3, 1, 2]))
baga: "GraphicsNDArray" = qattr((..., [2, 3, 1, 3]))
babr: "GraphicsNDArray" = qattr((..., [2, 3, 2, 0]))
babg: "GraphicsNDArray" = qattr((..., [2, 3, 2, 1]))
babb: "GraphicsNDArray" = qattr((..., [2, 3, 2, 2]))
baba: "GraphicsNDArray" = qattr((..., [2, 3, 2, 3]))
baar: "GraphicsNDArray" = qattr((..., [2, 3, 3, 0]))
baag: "GraphicsNDArray" = qattr((..., [2, 3, 3, 1]))
baab: "GraphicsNDArray" = qattr((..., [2, 3, 3, 2]))
baaa: "GraphicsNDArray" = qattr((..., [2, 3, 3, 3]))
arrr: "GraphicsNDArray" = qattr((..., [3, 0, 0, 0]))
arrg: "GraphicsNDArray" = qattr((..., [3, 0, 0, 1]))
arrb: "GraphicsNDArray" = qattr((..., [3, 0, 0, 2]))
arra: "GraphicsNDArray" = qattr((..., [3, 0, 0, 3]))
argr: "GraphicsNDArray" = qattr((..., [3, 0, 1, 0]))
argg: "GraphicsNDArray" = qattr((..., [3, 0, 1, 1]))
argb: "GraphicsNDArray" = qattr((..., [3, 0, 1, 2]))
arga: "GraphicsNDArray" = qattr((..., [3, 0, 1, 3]))
arbr: "GraphicsNDArray" = qattr((..., [3, 0, 2, 0]))
arbg: "GraphicsNDArray" = qattr((..., [3, 0, 2, 1]))
arbb: "GraphicsNDArray" = qattr((..., [3, 0, 2, 2]))
arba: "GraphicsNDArray" = qattr((..., [3, 0, 2, 3]))
arar: "GraphicsNDArray" = qattr((..., [3, 0, 3, 0]))
arag: "GraphicsNDArray" = qattr((..., [3, 0, 3, 1]))
arab: "GraphicsNDArray" = qattr((..., [3, 0, 3, 2]))
araa: "GraphicsNDArray" = qattr((..., [3, 0, 3, 3]))
agrr: "GraphicsNDArray" = qattr((..., [3, 1, 0, 0]))
agrg: "GraphicsNDArray" = qattr((..., [3, 1, 0, 1]))
agrb: "GraphicsNDArray" = qattr((..., [3, 1, 0, 2]))
agra: "GraphicsNDArray" = qattr((..., [3, 1, 0, 3]))
aggr: "GraphicsNDArray" = qattr((..., [3, 1, 1, 0]))
aggg: "GraphicsNDArray" = qattr((..., [3, 1, 1, 1]))
aggb: "GraphicsNDArray" = qattr((..., [3, 1, 1, 2]))
agga: "GraphicsNDArray" = qattr((..., [3, 1, 1, 3]))
agbr: "GraphicsNDArray" = qattr((..., [3, 1, 2, 0]))
agbg: "GraphicsNDArray" = qattr((..., [3, 1, 2, 1]))
agbb: "GraphicsNDArray" = qattr((..., [3, 1, 2, 2]))
agba: "GraphicsNDArray" = qattr((..., [3, 1, 2, 3]))
agar: "GraphicsNDArray" = qattr((..., [3, 1, 3, 0]))
agag: "GraphicsNDArray" = qattr((..., [3, 1, 3, 1]))
agab: "GraphicsNDArray" = qattr((..., [3, 1, 3, 2]))
agaa: "GraphicsNDArray" = qattr((..., [3, 1, 3, 3]))
abrr: "GraphicsNDArray" = qattr((..., [3, 2, 0, 0]))
abrg: "GraphicsNDArray" = qattr((..., [3, 2, 0, 1]))
abrb: "GraphicsNDArray" = qattr((..., [3, 2, 0, 2]))
abra: "GraphicsNDArray" = qattr((..., [3, 2, 0, 3]))
abgr: "GraphicsNDArray" = qattr((..., [3, 2, 1, 0]))
abgg: "GraphicsNDArray" = qattr((..., [3, 2, 1, 1]))
abgb: "GraphicsNDArray" = qattr((..., [3, 2, 1, 2]))
abga: "GraphicsNDArray" = qattr((..., [3, 2, 1, 3]))
abbr: "GraphicsNDArray" = qattr((..., [3, 2, 2, 0]))
abbg: "GraphicsNDArray" = qattr((..., [3, 2, 2, 1]))
abbb: "GraphicsNDArray" = qattr((..., [3, 2, 2, 2]))
abba: "GraphicsNDArray" = qattr((..., [3, 2, 2, 3]))
abar: "GraphicsNDArray" = qattr((..., [3, 2, 3, 0]))
abag: "GraphicsNDArray" = qattr((..., [3, 2, 3, 1]))
abab: "GraphicsNDArray" = qattr((..., [3, 2, 3, 2]))
abaa: "GraphicsNDArray" = qattr((..., [3, 2, 3, 3]))
aarr: "GraphicsNDArray" = qattr((..., [3, 3, 0, 0]))
aarg: "GraphicsNDArray" = qattr((..., [3, 3, 0, 1]))
aarb: "GraphicsNDArray" = qattr((..., [3, 3, 0, 2]))
aara: "GraphicsNDArray" = qattr((..., [3, 3, 0, 3]))
aagr: "GraphicsNDArray" = qattr((..., [3, 3, 1, 0]))
aagg: "GraphicsNDArray" = qattr((..., [3, 3, 1, 1]))
aagb: "GraphicsNDArray" = qattr((..., [3, 3, 1, 2]))
aaga: "GraphicsNDArray" = qattr((..., [3, 3, 1, 3]))
aabr: "GraphicsNDArray" = qattr((..., [3, 3, 2, 0]))
aabg: "GraphicsNDArray" = qattr((..., [3, 3, 2, 1]))
aabb: "GraphicsNDArray" = qattr((..., [3, 3, 2, 2]))
aaba: "GraphicsNDArray" = qattr((..., [3, 3, 2, 3]))
aaar: "GraphicsNDArray" = qattr((..., [3, 3, 3, 0]))
aaag: "GraphicsNDArray" = qattr((..., [3, 3, 3, 1]))
aaab: "GraphicsNDArray" = qattr((..., [3, 3, 3, 2]))
aaaa: "GraphicsNDArray" = qattr((..., [3, 3, 3, 3]))
NDArray = Union[numpy.ndarray, GraphicsNDArray]
T = TypeVar("T")
| @type_match(numpy.ndarray) | 1 | 2023-11-21 22:44:49+00:00 | 12k |
yk7333/d3po | scripts/train_with_rm.py | [
{
"identifier": "pipeline_with_logprob",
"path": "d3po_pytorch/diffusers_patch/pipeline_with_logprob.py",
"snippet": "@torch.no_grad()\ndef pipeline_with_logprob(\n self: StableDiffusionPipeline,\n prompt: Union[str, List[str]] = None,\n height: Optional[int] = None,\n width: Optional[int] =... | from collections import defaultdict
from concurrent import futures
from absl import app, flags
from ml_collections import config_flags
from accelerate import Accelerator
from accelerate.utils import set_seed, ProjectConfiguration
from accelerate.logging import get_logger
from diffusers import StableDiffusionPipeline, DDIMScheduler, UNet2DConditionModel
from diffusers.loaders import AttnProcsLayers
from diffusers.models.attention_processor import LoRAAttnProcessor
from d3po_pytorch.diffusers_patch.pipeline_with_logprob import pipeline_with_logprob
from d3po_pytorch.diffusers_patch.ddim_with_logprob import ddim_step_with_logprob
from functools import partial
from PIL import Image
import contextlib
import os
import copy
import datetime
import time
import sys
import numpy as np
import d3po_pytorch.prompts
import d3po_pytorch.rewards
import torch
import wandb
import tqdm
import tempfile
import copy
import numpy as np
import bitsandbytes as bnb | 7,614 | accelerator.register_save_state_pre_hook(save_model_hook)
accelerator.register_load_state_pre_hook(load_model_hook)
# Enable TF32 for faster training on Ampere GPUs,
# cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
if config.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
# Initialize the optimizer
if config.train.use_8bit_adam:
try:
except ImportError:
raise ImportError(
"Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`"
)
optimizer_cls = bnb.optim.AdamW8bit
else:
optimizer_cls = torch.optim.AdamW
optimizer = optimizer_cls(
trainable_layers.parameters(),
lr=config.train.learning_rate,
betas=(config.train.adam_beta1, config.train.adam_beta2),
weight_decay=config.train.adam_weight_decay,
eps=config.train.adam_epsilon,
)
# prepare prompt and reward fn
prompt_fn = getattr(d3po_pytorch.prompts, config.prompt_fn)
reward_fn = getattr(d3po_pytorch.rewards, config.reward_fn)()
# generate negative prompt embeddings
neg_prompt_embed = pipeline.text_encoder(
pipeline.tokenizer(
[""],
return_tensors="pt",
padding="max_length",
truncation=True,
max_length=pipeline.tokenizer.model_max_length,
).input_ids.to(accelerator.device)
)[0]
sample_neg_prompt_embeds = neg_prompt_embed.repeat(config.sample.batch_size, 1, 1)
train_neg_prompt_embeds = neg_prompt_embed.repeat(config.train.batch_size, 1, 1)
# for some reason, autocast is necessary for non-lora training but for lora training it isn't necessary and it uses
# more memory
autocast = contextlib.nullcontext if config.use_lora else accelerator.autocast
# Prepare everything with our `accelerator`.
trainable_layers, optimizer = accelerator.prepare(trainable_layers, optimizer)
# executor to perform callbacks asynchronously.
executor = futures.ThreadPoolExecutor(max_workers=2)
# Train!
samples_per_epoch = config.sample.batch_size * accelerator.num_processes * config.sample.num_batches_per_epoch
total_train_batch_size = (
config.train.batch_size * accelerator.num_processes * config.train.gradient_accumulation_steps
)
logger.info("***** Running training *****")
logger.info(f" Num Epochs = {config.num_epochs}")
logger.info(f" Sample batch size per device = {config.sample.batch_size}")
logger.info(f" Train batch size per device = {config.train.batch_size}")
logger.info(f" Gradient Accumulation steps = {config.train.gradient_accumulation_steps}")
logger.info("")
logger.info(f" Total number of samples per epoch = {samples_per_epoch}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Number of gradient updates per inner epoch = {samples_per_epoch // total_train_batch_size}")
logger.info(f" Number of inner epochs = {config.train.num_inner_epochs}")
assert config.sample.batch_size >= config.train.batch_size
assert config.sample.batch_size % config.train.batch_size == 0
assert samples_per_epoch % total_train_batch_size == 0
if config.resume_from:
logger.info(f"Resuming from {config.resume_from}")
accelerator.load_state(config.resume_from)
first_epoch = int(config.resume_from.split("_")[-1]) + 1
else:
first_epoch = 0
global_step = 0
for epoch in range(first_epoch, config.num_epochs):
#################### SAMPLING ####################
pipeline.unet.eval()
samples = []
prompt_metadata = None
for i in tqdm(
range(config.sample.num_batches_per_epoch),
desc=f"Epoch {epoch}: sampling",
disable=not accelerator.is_local_main_process,
position=0,
):
# generate prompts
prompts1, prompt_metadata = zip(
*[prompt_fn(**config.prompt_fn_kwargs) for _ in range(config.sample.batch_size)]
)
prompts2 = prompts1
# encode prompts
prompt_ids1 = pipeline.tokenizer(
prompts1,
return_tensors="pt",
padding="max_length",
truncation=True,
max_length=pipeline.tokenizer.model_max_length,
).input_ids.to(accelerator.device)
prompt_ids2 = pipeline.tokenizer(
prompts2,
return_tensors="pt",
padding="max_length",
truncation=True,
max_length=pipeline.tokenizer.model_max_length,
).input_ids.to(accelerator.device)
prompt_embeds1 = pipeline.text_encoder(prompt_ids1)[0]
prompt_embeds2 = pipeline.text_encoder(prompt_ids2)[0]
# sample
with autocast():
| script_path = os.path.abspath(__file__)
sys.path.append(os.path.dirname(os.path.dirname(script_path)))
tqdm = partial(tqdm.tqdm, dynamic_ncols=True)
FLAGS = flags.FLAGS
config_flags.DEFINE_config_file("config", "config/base.py", "Training configuration.")
logger = get_logger(__name__)
def main(_):
# basic Accelerate and logging setup
config = FLAGS.config
unique_id = datetime.datetime.now().strftime("%Y.%m.%d_%H.%M.%S")
if not config.run_name:
config.run_name = unique_id
else:
config.run_name += "_" + unique_id
if config.resume_from:
config.resume_from = os.path.normpath(os.path.expanduser(config.resume_from))
if "checkpoint_" not in os.path.basename(config.resume_from):
# get the most recent checkpoint in this directory
checkpoints = list(filter(lambda x: "checkpoint_" in x, os.listdir(config.resume_from)))
if len(checkpoints) == 0:
raise ValueError(f"No checkpoints found in {config.resume_from}")
config.resume_from = os.path.join(
config.resume_from,
sorted(checkpoints, key=lambda x: int(x.split("_")[-1]))[-1],
)
# number of timesteps within each trajectory to train on
num_train_timesteps = int(config.sample.num_steps * config.train.timestep_fraction)
accelerator_config = ProjectConfiguration(
project_dir=os.path.join(config.logdir, config.run_name),
automatic_checkpoint_naming=True,
total_limit=config.num_checkpoint_limit,
)
accelerator = Accelerator(
log_with="wandb",
mixed_precision=config.mixed_precision,
project_config=accelerator_config,
gradient_accumulation_steps=config.train.gradient_accumulation_steps * num_train_timesteps,
)
if accelerator.is_main_process:
accelerator.init_trackers(
project_name="d3po-pytorch", config=config.to_dict(), init_kwargs={"wandb": {"name": config.run_name}}
)
logger.info(f"\n{config}")
ramdom_seed = np.random.randint(0,100000)
set_seed(ramdom_seed, device_specific=True)
# load scheduler, tokenizer and models.
pipeline = StableDiffusionPipeline.from_pretrained(config.pretrained.model, torch_dtype=torch.float16)
if config.use_xformers:
pipeline.enable_xformers_memory_efficient_attention()
# freeze parameters of models to save more memory
pipeline.vae.requires_grad_(False)
pipeline.text_encoder.requires_grad_(False)
pipeline.unet.requires_grad_(not config.use_lora)
if not config.use_lora and config.train.activation_checkpointing:
pipeline.unet.enable_gradient_checkpointing()
# disable safety checker
pipeline.safety_checker = None
# make the progress bar nicer
pipeline.set_progress_bar_config(
position=1,
disable=not accelerator.is_local_main_process,
leave=False,
desc="Timestep",
dynamic_ncols=True,
)
# switch to DDIM scheduler
pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
# For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision
# as these weights are only used for inference, keeping weights in full precision is not required.
inference_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
inference_dtype = torch.float16
elif accelerator.mixed_precision == "bf16":
inference_dtype = torch.bfloat16
# Move unet, vae and text_encoder to device and cast to inference_dtype
pipeline.vae.to(accelerator.device, dtype=inference_dtype)
pipeline.text_encoder.to(accelerator.device, dtype=inference_dtype)
pipeline.unet.to(accelerator.device, dtype=inference_dtype)
ref = copy.deepcopy(pipeline.unet)
for param in ref.parameters():
param.requires_grad = False
if config.use_lora:
# Set correct lora layers
lora_attn_procs = {}
for name in pipeline.unet.attn_processors.keys():
cross_attention_dim = (
None if name.endswith("attn1.processor") else pipeline.unet.config.cross_attention_dim
)
if name.startswith("mid_block"):
hidden_size = pipeline.unet.config.block_out_channels[-1]
elif name.startswith("up_blocks"):
block_id = int(name[len("up_blocks.")])
hidden_size = list(reversed(pipeline.unet.config.block_out_channels))[block_id]
elif name.startswith("down_blocks"):
block_id = int(name[len("down_blocks.")])
hidden_size = pipeline.unet.config.block_out_channels[block_id]
lora_attn_procs[name] = LoRAAttnProcessor(hidden_size=hidden_size, cross_attention_dim=cross_attention_dim)
pipeline.unet.set_attn_processor(lora_attn_procs)
trainable_layers = AttnProcsLayers(pipeline.unet.attn_processors)
else:
trainable_layers = pipeline.unet
# set up diffusers-friendly checkpoint saving with Accelerate
def save_model_hook(models, weights, output_dir):
assert len(models) == 1
if config.use_lora and isinstance(models[0], AttnProcsLayers):
pipeline.unet.save_attn_procs(output_dir)
elif not config.use_lora and isinstance(models[0], UNet2DConditionModel):
models[0].save_pretrained(os.path.join(output_dir, "unet"))
else:
raise ValueError(f"Unknown model type {type(models[0])}")
weights.pop() # ensures that accelerate doesn't try to handle saving of the model
def load_model_hook(models, input_dir):
assert len(models) == 1
if config.use_lora and isinstance(models[0], AttnProcsLayers):
tmp_unet = UNet2DConditionModel.from_pretrained(
config.pretrained.model, revision=config.pretrained.revision, subfolder="unet"
)
tmp_unet.load_attn_procs(input_dir)
models[0].load_state_dict(AttnProcsLayers(tmp_unet.attn_processors).state_dict())
del tmp_unet
elif not config.use_lora and isinstance(models[0], UNet2DConditionModel):
load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet")
models[0].register_to_config(**load_model.config)
models[0].load_state_dict(load_model.state_dict())
del load_model
else:
raise ValueError(f"Unknown model type {type(models[0])}")
models.pop() # ensures that accelerate doesn't try to handle loading of the model
# Support multi-dimensional comparison. Default demension is 1. You can add many rewards instead of only one to judge the preference of images.
# For example: A: clipscore-30 blipscore-10 LAION aesthetic score-6.0 ; B: 20, 8, 5.0 then A is prefered than B
# if C: 40, 4, 4.0 since C[0] = 40 > A[0] and C[1] < A[1], we do not think C is prefered than A or A is prefered than C
def compare(a, b):
assert isinstance(a, torch.Tensor) and isinstance(b, torch.Tensor)
if len(a.shape)==1:
a = a[...,None]
b = b[...,None]
a_dominates = torch.logical_and(torch.all(a <= b, dim=1), torch.any(a < b, dim=1))
b_dominates = torch.logical_and(torch.all(b <= a, dim=1), torch.any(b < a, dim=1))
c = torch.zeros([a.shape[0],2],dtype=torch.float,device=a.device)
c[a_dominates] = torch.tensor([-1., 1.],device=a.device)
c[b_dominates] = torch.tensor([1., -1.],device=a.device)
return c
accelerator.register_save_state_pre_hook(save_model_hook)
accelerator.register_load_state_pre_hook(load_model_hook)
# Enable TF32 for faster training on Ampere GPUs,
# cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
if config.allow_tf32:
torch.backends.cuda.matmul.allow_tf32 = True
# Initialize the optimizer
if config.train.use_8bit_adam:
try:
except ImportError:
raise ImportError(
"Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`"
)
optimizer_cls = bnb.optim.AdamW8bit
else:
optimizer_cls = torch.optim.AdamW
optimizer = optimizer_cls(
trainable_layers.parameters(),
lr=config.train.learning_rate,
betas=(config.train.adam_beta1, config.train.adam_beta2),
weight_decay=config.train.adam_weight_decay,
eps=config.train.adam_epsilon,
)
# prepare prompt and reward fn
prompt_fn = getattr(d3po_pytorch.prompts, config.prompt_fn)
reward_fn = getattr(d3po_pytorch.rewards, config.reward_fn)()
# generate negative prompt embeddings
neg_prompt_embed = pipeline.text_encoder(
pipeline.tokenizer(
[""],
return_tensors="pt",
padding="max_length",
truncation=True,
max_length=pipeline.tokenizer.model_max_length,
).input_ids.to(accelerator.device)
)[0]
sample_neg_prompt_embeds = neg_prompt_embed.repeat(config.sample.batch_size, 1, 1)
train_neg_prompt_embeds = neg_prompt_embed.repeat(config.train.batch_size, 1, 1)
# for some reason, autocast is necessary for non-lora training but for lora training it isn't necessary and it uses
# more memory
autocast = contextlib.nullcontext if config.use_lora else accelerator.autocast
# Prepare everything with our `accelerator`.
trainable_layers, optimizer = accelerator.prepare(trainable_layers, optimizer)
# executor to perform callbacks asynchronously.
executor = futures.ThreadPoolExecutor(max_workers=2)
# Train!
samples_per_epoch = config.sample.batch_size * accelerator.num_processes * config.sample.num_batches_per_epoch
total_train_batch_size = (
config.train.batch_size * accelerator.num_processes * config.train.gradient_accumulation_steps
)
logger.info("***** Running training *****")
logger.info(f" Num Epochs = {config.num_epochs}")
logger.info(f" Sample batch size per device = {config.sample.batch_size}")
logger.info(f" Train batch size per device = {config.train.batch_size}")
logger.info(f" Gradient Accumulation steps = {config.train.gradient_accumulation_steps}")
logger.info("")
logger.info(f" Total number of samples per epoch = {samples_per_epoch}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Number of gradient updates per inner epoch = {samples_per_epoch // total_train_batch_size}")
logger.info(f" Number of inner epochs = {config.train.num_inner_epochs}")
assert config.sample.batch_size >= config.train.batch_size
assert config.sample.batch_size % config.train.batch_size == 0
assert samples_per_epoch % total_train_batch_size == 0
if config.resume_from:
logger.info(f"Resuming from {config.resume_from}")
accelerator.load_state(config.resume_from)
first_epoch = int(config.resume_from.split("_")[-1]) + 1
else:
first_epoch = 0
global_step = 0
for epoch in range(first_epoch, config.num_epochs):
#################### SAMPLING ####################
pipeline.unet.eval()
samples = []
prompt_metadata = None
for i in tqdm(
range(config.sample.num_batches_per_epoch),
desc=f"Epoch {epoch}: sampling",
disable=not accelerator.is_local_main_process,
position=0,
):
# generate prompts
prompts1, prompt_metadata = zip(
*[prompt_fn(**config.prompt_fn_kwargs) for _ in range(config.sample.batch_size)]
)
prompts2 = prompts1
# encode prompts
prompt_ids1 = pipeline.tokenizer(
prompts1,
return_tensors="pt",
padding="max_length",
truncation=True,
max_length=pipeline.tokenizer.model_max_length,
).input_ids.to(accelerator.device)
prompt_ids2 = pipeline.tokenizer(
prompts2,
return_tensors="pt",
padding="max_length",
truncation=True,
max_length=pipeline.tokenizer.model_max_length,
).input_ids.to(accelerator.device)
prompt_embeds1 = pipeline.text_encoder(prompt_ids1)[0]
prompt_embeds2 = pipeline.text_encoder(prompt_ids2)[0]
# sample
with autocast(): | images1, _, latents1, log_probs1 = pipeline_with_logprob( | 0 | 2023-11-23 08:08:20+00:00 | 12k |
alexzhou907/DreamPropeller | threestudio/models/guidance/zero123_unified_guidance.py | [
{
"identifier": "Zero123Pipeline",
"path": "extern/zero123.py",
"snippet": "class Zero123Pipeline(DiffusionPipeline):\n r\"\"\"\n Pipeline to generate variations from an input image using Stable Diffusion.\n\n This model inherits from [`DiffusionPipeline`]. Check the superclass documentation fo... | import os
import random
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms.functional as TF
import threestudio
from contextlib import contextmanager
from dataclasses import dataclass, field
from diffusers import (
AutoencoderKL,
DDPMScheduler,
DPMSolverSinglestepScheduler,
UNet2DConditionModel,
)
from diffusers.loaders import AttnProcsLayers
from diffusers.models.attention_processor import LoRAAttnProcessor
from diffusers.models.embeddings import TimestepEmbedding
from PIL import Image
from tqdm import tqdm
from extern.zero123 import Zero123Pipeline
from threestudio.models.networks import ToDTypeWrapper
from threestudio.models.prompt_processors.base import PromptProcessorOutput
from threestudio.utils.base import BaseModule
from threestudio.utils.misc import C, cleanup, enable_gradient, parse_version
from threestudio.utils.typing import * | 10,515 |
# need to make sure the pipeline file is in path
sys.path.append("extern/")
pipe_kwargs = {
"safety_checker": None,
"requires_safety_checker": False,
"variant": "fp16" if self.cfg.half_precision_weights else None,
"torch_dtype": self.weights_dtype,
}
pipe = Zero123Pipeline.from_pretrained(
self.cfg.pretrained_model_name_or_path,
**pipe_kwargs,
).to(self.device)
self.prepare_pipe(pipe)
# phi network for VSD
# introduce two trainable modules:
# - self.camera_embedding
# - self.lora_layers
pipe_phi = None
# if the phi network shares the same unet with the pretrain network
# we need to pass additional cross attention kwargs to the unet
self.vsd_share_model = (
self.cfg.guidance_type == "vsd"
and self.cfg.vsd_phi_model_name_or_path is None
)
if self.cfg.guidance_type == "vsd":
if self.cfg.vsd_phi_model_name_or_path is None:
pipe_phi = pipe
else:
pipe_phi = Zero123Pipeline.from_pretrained(
self.cfg.vsd_phi_model_name_or_path,
**pipe_kwargs,
).to(self.device)
self.prepare_pipe(pipe_phi)
# set up camera embedding
if self.cfg.vsd_use_camera_condition:
if self.cfg.vsd_camera_condition_type in ["extrinsics", "mvp"]:
self.camera_embedding_dim = 16
elif self.cfg.vsd_camera_condition_type == "spherical":
self.camera_embedding_dim = 4
else:
raise ValueError("Invalid camera condition type!")
# FIXME: hard-coded output dim
self.camera_embedding = ToDTypeWrapper(
TimestepEmbedding(self.camera_embedding_dim, 1280),
self.weights_dtype,
).to(self.device)
pipe_phi.unet.class_embedding = self.camera_embedding
if self.cfg.vsd_use_lora:
# set up LoRA layers
lora_attn_procs = {}
for name in pipe_phi.unet.attn_processors.keys():
cross_attention_dim = (
None
if name.endswith("attn1.processor")
else pipe_phi.unet.config.cross_attention_dim
)
if name.startswith("mid_block"):
hidden_size = pipe_phi.unet.config.block_out_channels[-1]
elif name.startswith("up_blocks"):
block_id = int(name[len("up_blocks.")])
hidden_size = list(
reversed(pipe_phi.unet.config.block_out_channels)
)[block_id]
elif name.startswith("down_blocks"):
block_id = int(name[len("down_blocks.")])
hidden_size = pipe_phi.unet.config.block_out_channels[block_id]
lora_attn_procs[name] = LoRAAttnProcessor(
hidden_size=hidden_size, cross_attention_dim=cross_attention_dim
)
pipe_phi.unet.set_attn_processor(lora_attn_procs)
self.lora_layers = AttnProcsLayers(pipe_phi.unet.attn_processors).to(
self.device
)
self.lora_layers._load_state_dict_pre_hooks.clear()
self.lora_layers._state_dict_hooks.clear()
threestudio.info(f"Loaded Stable Diffusion!")
self.scheduler = DDPMScheduler.from_config(pipe.scheduler.config)
self.num_train_timesteps = self.scheduler.config.num_train_timesteps
# q(z_t|x) = N(alpha_t x, sigma_t^2 I)
# in DDPM, alpha_t = sqrt(alphas_cumprod_t), sigma_t^2 = 1 - alphas_cumprod_t
self.alphas_cumprod: Float[Tensor, "T"] = self.scheduler.alphas_cumprod.to(
self.device
)
self.alphas: Float[Tensor, "T"] = self.alphas_cumprod**0.5
self.sigmas: Float[Tensor, "T"] = (1 - self.alphas_cumprod) ** 0.5
# log SNR
self.lambdas: Float[Tensor, "T"] = self.sigmas / self.alphas
self._non_trainable_modules = NonTrainableModules(
pipe=pipe,
pipe_phi=pipe_phi,
)
# self.clip_image_embeddings and self.image_latents
self.prepare_image_embeddings()
@property
def pipe(self) -> Zero123Pipeline:
return self._non_trainable_modules.pipe
@property
def pipe_phi(self) -> Zero123Pipeline:
if self._non_trainable_modules.pipe_phi is None:
raise RuntimeError("phi model is not available.")
return self._non_trainable_modules.pipe_phi
def prepare_pipe(self, pipe: Zero123Pipeline):
|
@threestudio.register("zero123-unified-guidance")
class Zero123UnifiedGuidance(BaseModule):
@dataclass
class Config(BaseModule.Config):
# guidance type, in ["sds", "vsd"]
guidance_type: str = "sds"
pretrained_model_name_or_path: str = "bennyguo/zero123-diffusers"
guidance_scale: float = 5.0
weighting_strategy: str = "dreamfusion"
min_step_percent: Any = 0.02
max_step_percent: Any = 0.98
grad_clip: Optional[Any] = None
return_rgb_1step_orig: bool = False
return_rgb_multistep_orig: bool = False
n_rgb_multistep_orig_steps: int = 4
cond_image_path: str = ""
cond_elevation_deg: float = 0.0
cond_azimuth_deg: float = 0.0
cond_camera_distance: float = 1.2
# efficiency-related configurations
half_precision_weights: bool = True
# VSD configurations, only used when guidance_type is "vsd"
vsd_phi_model_name_or_path: Optional[str] = None
vsd_guidance_scale_phi: float = 1.0
vsd_use_lora: bool = True
vsd_lora_cfg_training: bool = False
vsd_lora_n_timestamp_samples: int = 1
vsd_use_camera_condition: bool = True
# camera condition type, in ["extrinsics", "mvp", "spherical"]
vsd_camera_condition_type: Optional[str] = "extrinsics"
cfg: Config
def configure(self) -> None:
self.min_step: Optional[int] = None
self.max_step: Optional[int] = None
self.grad_clip_val: Optional[float] = None
@dataclass
class NonTrainableModules:
pipe: Zero123Pipeline
pipe_phi: Optional[Zero123Pipeline] = None
self.weights_dtype = (
torch.float16 if self.cfg.half_precision_weights else torch.float32
)
threestudio.info(f"Loading Zero123 ...")
# need to make sure the pipeline file is in path
sys.path.append("extern/")
pipe_kwargs = {
"safety_checker": None,
"requires_safety_checker": False,
"variant": "fp16" if self.cfg.half_precision_weights else None,
"torch_dtype": self.weights_dtype,
}
pipe = Zero123Pipeline.from_pretrained(
self.cfg.pretrained_model_name_or_path,
**pipe_kwargs,
).to(self.device)
self.prepare_pipe(pipe)
# phi network for VSD
# introduce two trainable modules:
# - self.camera_embedding
# - self.lora_layers
pipe_phi = None
# if the phi network shares the same unet with the pretrain network
# we need to pass additional cross attention kwargs to the unet
self.vsd_share_model = (
self.cfg.guidance_type == "vsd"
and self.cfg.vsd_phi_model_name_or_path is None
)
if self.cfg.guidance_type == "vsd":
if self.cfg.vsd_phi_model_name_or_path is None:
pipe_phi = pipe
else:
pipe_phi = Zero123Pipeline.from_pretrained(
self.cfg.vsd_phi_model_name_or_path,
**pipe_kwargs,
).to(self.device)
self.prepare_pipe(pipe_phi)
# set up camera embedding
if self.cfg.vsd_use_camera_condition:
if self.cfg.vsd_camera_condition_type in ["extrinsics", "mvp"]:
self.camera_embedding_dim = 16
elif self.cfg.vsd_camera_condition_type == "spherical":
self.camera_embedding_dim = 4
else:
raise ValueError("Invalid camera condition type!")
# FIXME: hard-coded output dim
self.camera_embedding = ToDTypeWrapper(
TimestepEmbedding(self.camera_embedding_dim, 1280),
self.weights_dtype,
).to(self.device)
pipe_phi.unet.class_embedding = self.camera_embedding
if self.cfg.vsd_use_lora:
# set up LoRA layers
lora_attn_procs = {}
for name in pipe_phi.unet.attn_processors.keys():
cross_attention_dim = (
None
if name.endswith("attn1.processor")
else pipe_phi.unet.config.cross_attention_dim
)
if name.startswith("mid_block"):
hidden_size = pipe_phi.unet.config.block_out_channels[-1]
elif name.startswith("up_blocks"):
block_id = int(name[len("up_blocks.")])
hidden_size = list(
reversed(pipe_phi.unet.config.block_out_channels)
)[block_id]
elif name.startswith("down_blocks"):
block_id = int(name[len("down_blocks.")])
hidden_size = pipe_phi.unet.config.block_out_channels[block_id]
lora_attn_procs[name] = LoRAAttnProcessor(
hidden_size=hidden_size, cross_attention_dim=cross_attention_dim
)
pipe_phi.unet.set_attn_processor(lora_attn_procs)
self.lora_layers = AttnProcsLayers(pipe_phi.unet.attn_processors).to(
self.device
)
self.lora_layers._load_state_dict_pre_hooks.clear()
self.lora_layers._state_dict_hooks.clear()
threestudio.info(f"Loaded Stable Diffusion!")
self.scheduler = DDPMScheduler.from_config(pipe.scheduler.config)
self.num_train_timesteps = self.scheduler.config.num_train_timesteps
# q(z_t|x) = N(alpha_t x, sigma_t^2 I)
# in DDPM, alpha_t = sqrt(alphas_cumprod_t), sigma_t^2 = 1 - alphas_cumprod_t
self.alphas_cumprod: Float[Tensor, "T"] = self.scheduler.alphas_cumprod.to(
self.device
)
self.alphas: Float[Tensor, "T"] = self.alphas_cumprod**0.5
self.sigmas: Float[Tensor, "T"] = (1 - self.alphas_cumprod) ** 0.5
# log SNR
self.lambdas: Float[Tensor, "T"] = self.sigmas / self.alphas
self._non_trainable_modules = NonTrainableModules(
pipe=pipe,
pipe_phi=pipe_phi,
)
# self.clip_image_embeddings and self.image_latents
self.prepare_image_embeddings()
@property
def pipe(self) -> Zero123Pipeline:
return self._non_trainable_modules.pipe
@property
def pipe_phi(self) -> Zero123Pipeline:
if self._non_trainable_modules.pipe_phi is None:
raise RuntimeError("phi model is not available.")
return self._non_trainable_modules.pipe_phi
def prepare_pipe(self, pipe: Zero123Pipeline): | cleanup() | 5 | 2023-11-27 23:39:49+00:00 | 12k |
DAMO-NLP-SG/VCD | experiments/llava/model/language_model/mpt/modeling_mpt.py | [
{
"identifier": "attn_bias_shape",
"path": "experiments/llava/model/language_model/mpt/attention.py",
"snippet": "def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n ... | import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Optional, Tuple, Union
from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from .attention import attn_bias_shape, build_attn_bias
from .blocks import MPTBlock
from .custom_embedding import SharedEmbedding
from .norm import NORM_CLASS_REGISTRY
from .configuration_mpt import MPTConfig
from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm
from .meta_init_context import init_empty_weights
from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
from .flash_attn_triton import flash_attn_func | 7,371 | """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype)
| """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype) | self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max) | 1 | 2023-11-26 12:34:31+00:00 | 12k |
CineMingle/CineMingle | Movie_Data_Capture.py | [
{
"identifier": "get_data_from_json",
"path": "scraper.py",
"snippet": "def get_data_from_json(\n file_number: str,\n open_cc: opencc.OpenCC,\n specified_source: str, specified_url: str) -> typing.Optional[dict]:\n \n # iterate through all services and fetch the data 从网站上查询片名解... | import argparse
import json
import os
import random
import re
import sys
import time
import shutil
import typing
import urllib3
import signal
import platform
import config
from datetime import datetime, timedelta
from lxml import etree
from pathlib import Path
from opencc import OpenCC
from scraper import get_data_from_json
from ADC_function import file_modification_days, get_html, parallel_download_files
from number_parser import get_number
from core import core_main, core_main_no_net_op, moveFailedFolder, debug_print | 9,816 |
def check_update(local_version):
"""
Check for updates by comparing the local version of the application with the latest version available on GitHub.
It fetches the latest release information from GitHub and compares the version numbers.
If a new version is available, it prints out the update information.
:param local_version: The current local version of the application.
"""
|
def check_update(local_version):
"""
Check for updates by comparing the local version of the application with the latest version available on GitHub.
It fetches the latest release information from GitHub and compares the version numbers.
If a new version is available, it prints out the update information.
:param local_version: The current local version of the application.
""" | htmlcode = get_html("https://api.github.com/repos/CineMingle/CineMingle/releases/latest") | 2 | 2023-11-25 03:16:13+00:00 | 12k |
crystallee-ai/controlGIF | animatediff/models/unet.py | [
{
"identifier": "CrossAttnDownBlock3D",
"path": "animatediff/models/unet_blocks.py",
"snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n num_layers... | import sys
import os
import json
import torch
import torch.nn as nn
import torch.utils.checkpoint
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.loaders import UNet2DConditionLoadersMixin
from diffusers.utils import BaseOutput, logging
from diffusers.models.attention_processor import (
ADDED_KV_ATTENTION_PROCESSORS,
CROSS_ATTENTION_PROCESSORS,
AttentionProcessor,
AttnAddedKVProcessor,
AttnProcessor,
)
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from diffusers.models.modeling_utils import ModelMixin
from animatediff.models.unet_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block,
)
from .resnet import InflatedConv3d, InflatedGroupNorm
from diffusers.utils import WEIGHTS_NAME | 7,713 | in_channels: int = 4,
out_channels: int = 4,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
#-----
mid_block_type: str = "UnetMidBlock3DCrossAttn",
#-----
up_block_types: Tuple[str] = (
"UpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D"
),
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
# cross_attention_dim: int = 1024,
cross_attention_dim: int = 1280,
# attention_head_dim: Union[int, Tuple[int]] = 64,
attention_head_dim: Union[int, Tuple[int]] = 8,
num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
use_inflated_groupnorm=False,
# Additional
use_motion_module = False,
motion_module_resolutions = ( 1,2,4,8 ),
motion_module_mid_block = False,
motion_module_decoder_only = False,
motion_module_type = None,
motion_module_kwargs = {},
unet_use_cross_frame_attention = None,
unet_use_temporal_attention = None,
):
super().__init__()
self.sample_size = sample_size
# time_embed_dim = block_out_channels[0] * 4
if num_attention_heads is not None:
raise NotImplementedError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19."
)
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
num_attention_heads = num_attention_heads or attention_head_dim
# Check inputs
if len(down_block_types) != len(up_block_types):
raise ValueError(
f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
)
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
)
# input
conv_in_kernel = 3
conv_out_kernel = 3
conv_in_padding = (conv_in_kernel - 1) // 2
# self.conv_in = nn.Conv2d(
# in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
# )
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
time_embed_dim = block_out_channels[0] * 4
self.time_proj = Timesteps(block_out_channels[0], True, 0)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
)
# self.transformer_in = TransformerTemporalModel(
# num_attention_heads=8,
# attention_head_dim=attention_head_dim,
# in_channels=block_out_channels[0],
# num_layers=1,
# )
# class embedding
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(num_attention_heads, int):
num_attention_heads = (num_attention_heads,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
res = 2 ** i
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
| sys.path.append("/root/autodl-tmp/code/animatediff/modelshigh")
# from diffusers.models.transformer_temporal import TransformerTemporalModel
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
"""
The output of [`UNet3DConditionModel`].
Args:
sample (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model.
"""
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):
_supports_gradient_checkpointing = False
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
#-----
mid_block_type: str = "UnetMidBlock3DCrossAttn",
#-----
up_block_types: Tuple[str] = (
"UpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D"
),
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
# cross_attention_dim: int = 1024,
cross_attention_dim: int = 1280,
# attention_head_dim: Union[int, Tuple[int]] = 64,
attention_head_dim: Union[int, Tuple[int]] = 8,
num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
use_inflated_groupnorm=False,
# Additional
use_motion_module = False,
motion_module_resolutions = ( 1,2,4,8 ),
motion_module_mid_block = False,
motion_module_decoder_only = False,
motion_module_type = None,
motion_module_kwargs = {},
unet_use_cross_frame_attention = None,
unet_use_temporal_attention = None,
):
super().__init__()
self.sample_size = sample_size
# time_embed_dim = block_out_channels[0] * 4
if num_attention_heads is not None:
raise NotImplementedError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19."
)
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
num_attention_heads = num_attention_heads or attention_head_dim
# Check inputs
if len(down_block_types) != len(up_block_types):
raise ValueError(
f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
)
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
)
# input
conv_in_kernel = 3
conv_out_kernel = 3
conv_in_padding = (conv_in_kernel - 1) // 2
# self.conv_in = nn.Conv2d(
# in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
# )
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
time_embed_dim = block_out_channels[0] * 4
self.time_proj = Timesteps(block_out_channels[0], True, 0)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
)
# self.transformer_in = TransformerTemporalModel(
# num_attention_heads=8,
# attention_head_dim=attention_head_dim,
# in_channels=block_out_channels[0],
# num_layers=1,
# )
# class embedding
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(num_attention_heads, int):
num_attention_heads = (num_attention_heads,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
res = 2 ** i
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
| down_block = get_down_block( | 5 | 2023-11-25 07:43:32+00:00 | 12k |
amazon-science/instruct-video-to-video | modules/video_unet_temporal/unet.py | [
{
"identifier": "CrossAttnDownBlock3D",
"path": "modules/video_unet_temporal/unet_blocks.py",
"snippet": "class CrossAttnDownBlock3D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n n... | from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.models.modeling_utils import ModelMixin
from diffusers.utils import BaseOutput, logging
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from .unet_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block,
)
from .resnet import InflatedConv3d
import os
import json
import torch
import torch.nn as nn
import torch.utils.checkpoint | 8,589 |
# count how many layers upsample the videos
self.num_upsamplers = 0
# up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_attention_head_dim = list(reversed(attention_head_dim))
only_cross_attention = list(reversed(only_cross_attention))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
res = 2 ** (3 - i)
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=reversed_attention_head_dim[i],
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
use_motion_module=use_motion_module and (res in motion_module_resolutions),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)
self.conv_act = nn.SiLU()
self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_slicable_layers * [1]
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def _set_gradient_checkpointing(self, module, value=False):
| # Adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/unet_2d_condition.py
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin):
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
center_input_sample: bool = False,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
mid_block_type: str = "UNetMidBlock3DCrossAttn",
up_block_types: Tuple[str] = (
"UpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D",
"CrossAttnUpBlock3D"
),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: int = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1280,
attention_head_dim: Union[int, Tuple[int]] = 8,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
# Additional
use_motion_module = True,
motion_module_resolutions = ( 1,2,4,8 ),
motion_module_mid_block = True,
motion_module_decoder_only = False,
motion_module_type = 'Vanilla',
motion_module_kwargs = {},
):
super().__init__()
self.sample_size = sample_size
time_embed_dim = block_out_channels[0] * 4
# input
self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))
# time
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
# class embedding
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
else:
self.class_embedding = None
self.down_blocks = nn.ModuleList([])
self.mid_block = None
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
only_cross_attention = [only_cross_attention] * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
res = 2 ** i
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[i],
downsample_padding=downsample_padding,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.down_blocks.append(down_block)
# mid
if mid_block_type == "UNetMidBlock3DCrossAttn":
self.mid_block = UNetMidBlock3DCrossAttn(
in_channels=block_out_channels[-1],
temb_channels=time_embed_dim,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
output_scale_factor=mid_block_scale_factor,
resnet_time_scale_shift=resnet_time_scale_shift,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[-1],
resnet_groups=norm_num_groups,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
use_motion_module=use_motion_module and motion_module_mid_block,
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
else:
raise ValueError(f"unknown mid_block_type : {mid_block_type}")
# count how many layers upsample the videos
self.num_upsamplers = 0
# up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_attention_head_dim = list(reversed(attention_head_dim))
only_cross_attention = list(reversed(only_cross_attention))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
res = 2 ** (3 - i)
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=reversed_attention_head_dim[i],
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
use_motion_module=use_motion_module and (res in motion_module_resolutions),
motion_module_type=motion_module_type,
motion_module_kwargs=motion_module_kwargs,
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)
self.conv_act = nn.SiLU()
self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_slicable_layers * [1]
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def _set_gradient_checkpointing(self, module, value=False): | if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)): | 4 | 2023-11-25 06:00:08+00:00 | 12k |
abdulhaim/LMRL-Gym | llm_rl_scripts/maze/ilql/eval_ilql.py | [
{
"identifier": "text_env_eval",
"path": "LLM_RL/environment.py",
"snippet": "def text_env_eval(\n env: Union[TextEnv, BatchedTextEnv], \n policy: Union[TextPolicy, BatchedTextPolicy], \n n_rollouts: int, \n initial_text_history: Optional[TextHistory]=None, # only allow one initial_text_hist... | from typing import Optional
from JaxSeq.bucket_manager import open_with_bucket as open
from transformers import AutoTokenizer
from JaxSeq.utils import convert_path, load_mesh, create_path
from JaxSeq.utils import BlockingStrategy, Padding, Truncation
from JaxSeq.models.gpt2.interface import GPT2InferenceMask
from JaxSeq.models.gpt2.load import ModelLoadMode, load_params
from transformers.generation import GenerationConfig
from LLM_RL.environment import text_env_eval
from llm_rl_scripts.maze.env.maze_utils import setup_maze_env, maze_solver
from collections import defaultdict
from LLM_RL.algorithms.ppo.reranker_policy import ReRankerSamplePolicy, ReRankerPolicy
from llm_rl_scripts.maze.env.env import maze_proposal_function
from flax.traverse_util import flatten_dict, unflatten_dict
from LLM_RL.environment import Text
from llm_rl_scripts.maze.env.env import describe_observation_give_position
from LLM_RL.algorithms.value_rl_base.gpt2.interface import GPT2ValuePolicy, GPT2ValueRLInference
from LLM_RL.heads.mlp_head import load_params as load_head_params
from LLM_RL.algorithms.ilql.gpt2.score_fn import build_ilql_score_fn
import tyro
import jax
import jax.numpy as jnp
import os
import pickle as pkl
import json
import numpy as np | 8,557 | policy_temperature: Optional[float]=None,
policy_top_p: Optional[float]=None,
policy_top_k: Optional[int]=None,
policy_beta: float=16.0,
maze_name:str="double_t_maze",
describe_function:str="describe_observation_give_position",
maze_last_k: int=1,
maze_reward_function: str="standard_reward",
do_accuracy_eval: bool=True,
do_reward_eval: bool=True,
use_reranker_for_reward_eval: bool=False,
force_pad_embeddings: bool=False,
):
assert model_load_mode != ModelLoadMode.HF
input_args = locals()
print(input_args)
tokenizer = AutoTokenizer.from_pretrained('gpt2')
tokenizer.add_special_tokens({'pad_token': '<|pad|>'})
mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp'))
is_main_process = jax.process_index() == 0
print(f"Mesh: {mesh}")
print(f"Is main process: {is_main_process}")
env = setup_maze_env(
maze_name=maze_name,
describe_function=describe_function,
reward_function=maze_reward_function,
last_k=maze_last_k,
)
possible_positions = list(zip(*np.where(env.maze==0)))
for goal in env.valid_goals:
possible_positions.remove(tuple(goal.tolist()))
optimal_policy = maze_solver(1-env.maze, list(map(tuple, env.valid_goals.tolist())))
pi_beta_prng_key = jax.random.PRNGKey(0)
pi_beta_params, _ = load_params(
model_load_mode=pi_beta_load_mode,
model_load_path=convert_path(pi_beta_load_path) if pi_beta_load_mode != ModelLoadMode.HF else pi_beta_load_path,
model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32,
tokenizer=tokenizer,
mesh=mesh,
prng_key=pi_beta_prng_key,
force_pad_embeddings=force_pad_embeddings,
params_dtype=jnp.float32,
)
base_prng_key = jax.random.PRNGKey(0)
base_params, base_model = load_params(
model_load_mode=model_load_mode,
model_load_path=convert_path(os.path.join(model_load_path, 'base')),
model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32,
tokenizer=tokenizer,
mesh=mesh,
prng_key=base_prng_key,
force_pad_embeddings=force_pad_embeddings,
params_dtype=jnp.float32,
)
q1_head_params, q_head = load_head_params(
model_load_mode=model_load_mode.value,
model_load_path=convert_path(os.path.join(model_load_path, 'q1_head')),
model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32,
mesh=mesh,
prng_key=jax.random.PRNGKey(0),
pad_to_output_dim=None,
params_dtype=jnp.float32,
)
q2_head_params, _ = load_head_params(
model_load_mode=model_load_mode.value,
model_load_path=convert_path(os.path.join(model_load_path, 'q2_head')),
model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32,
mesh=mesh,
prng_key=jax.random.PRNGKey(0),
pad_to_output_dim=None,
params_dtype=jnp.float32,
)
v_head_params, v_head = load_head_params(
model_load_mode=model_load_mode.value,
model_load_path=convert_path(os.path.join(model_load_path, 'v_head')),
model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32,
mesh=mesh,
prng_key=jax.random.PRNGKey(0),
pad_to_output_dim=None,
params_dtype=jnp.float32,
)
inference = GPT2ValueRLInference.load_inference(
pi_beta_params=pi_beta_params,
base_params=base_params,
q1_head_params=q1_head_params,
q2_head_params=q2_head_params,
v_head_params=v_head_params,
pi_beta_model=base_model,
base_model=base_model,
q_head_model=q_head,
v_head_model=v_head,
tokenizer=tokenizer,
beta=policy_beta,
dp_shard_logits=True,
)
policy_prng = jax.random.PRNGKey(0)
def evaluator(inference: GPT2InferenceMask):
nonlocal policy_prng
policy_prng, new_key = jax.random.split(policy_prng)
all_results = dict()
interactions = dict()
if do_reward_eval:
if use_reranker_for_reward_eval:
if policy_do_sample:
policy = ReRankerSamplePolicy(
|
def main(
model_load_mode: ModelLoadMode,
model_load_path: str,
pi_beta_load_mode: ModelLoadMode,
pi_beta_load_path: str,
/, # Mark the end of positional arguments.
outputs_path: Optional[str]=None,
data_mesh_shape: int=1,
fsdp_mesh_shape: int=1,
model_mesh_shape: int=-1,
bf16_activations: bool=False,
policy_n_rollouts: int=32,
policy_bsize: int=1,
policy_max_input_length: int=256,
policy_max_output_length: int=256,
policy_do_sample: bool=True,
policy_num_beams: int=1,
policy_temperature: Optional[float]=None,
policy_top_p: Optional[float]=None,
policy_top_k: Optional[int]=None,
policy_beta: float=16.0,
maze_name:str="double_t_maze",
describe_function:str="describe_observation_give_position",
maze_last_k: int=1,
maze_reward_function: str="standard_reward",
do_accuracy_eval: bool=True,
do_reward_eval: bool=True,
use_reranker_for_reward_eval: bool=False,
force_pad_embeddings: bool=False,
):
assert model_load_mode != ModelLoadMode.HF
input_args = locals()
print(input_args)
tokenizer = AutoTokenizer.from_pretrained('gpt2')
tokenizer.add_special_tokens({'pad_token': '<|pad|>'})
mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp'))
is_main_process = jax.process_index() == 0
print(f"Mesh: {mesh}")
print(f"Is main process: {is_main_process}")
env = setup_maze_env(
maze_name=maze_name,
describe_function=describe_function,
reward_function=maze_reward_function,
last_k=maze_last_k,
)
possible_positions = list(zip(*np.where(env.maze==0)))
for goal in env.valid_goals:
possible_positions.remove(tuple(goal.tolist()))
optimal_policy = maze_solver(1-env.maze, list(map(tuple, env.valid_goals.tolist())))
pi_beta_prng_key = jax.random.PRNGKey(0)
pi_beta_params, _ = load_params(
model_load_mode=pi_beta_load_mode,
model_load_path=convert_path(pi_beta_load_path) if pi_beta_load_mode != ModelLoadMode.HF else pi_beta_load_path,
model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32,
tokenizer=tokenizer,
mesh=mesh,
prng_key=pi_beta_prng_key,
force_pad_embeddings=force_pad_embeddings,
params_dtype=jnp.float32,
)
base_prng_key = jax.random.PRNGKey(0)
base_params, base_model = load_params(
model_load_mode=model_load_mode,
model_load_path=convert_path(os.path.join(model_load_path, 'base')),
model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32,
tokenizer=tokenizer,
mesh=mesh,
prng_key=base_prng_key,
force_pad_embeddings=force_pad_embeddings,
params_dtype=jnp.float32,
)
q1_head_params, q_head = load_head_params(
model_load_mode=model_load_mode.value,
model_load_path=convert_path(os.path.join(model_load_path, 'q1_head')),
model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32,
mesh=mesh,
prng_key=jax.random.PRNGKey(0),
pad_to_output_dim=None,
params_dtype=jnp.float32,
)
q2_head_params, _ = load_head_params(
model_load_mode=model_load_mode.value,
model_load_path=convert_path(os.path.join(model_load_path, 'q2_head')),
model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32,
mesh=mesh,
prng_key=jax.random.PRNGKey(0),
pad_to_output_dim=None,
params_dtype=jnp.float32,
)
v_head_params, v_head = load_head_params(
model_load_mode=model_load_mode.value,
model_load_path=convert_path(os.path.join(model_load_path, 'v_head')),
model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32,
mesh=mesh,
prng_key=jax.random.PRNGKey(0),
pad_to_output_dim=None,
params_dtype=jnp.float32,
)
inference = GPT2ValueRLInference.load_inference(
pi_beta_params=pi_beta_params,
base_params=base_params,
q1_head_params=q1_head_params,
q2_head_params=q2_head_params,
v_head_params=v_head_params,
pi_beta_model=base_model,
base_model=base_model,
q_head_model=q_head,
v_head_model=v_head,
tokenizer=tokenizer,
beta=policy_beta,
dp_shard_logits=True,
)
policy_prng = jax.random.PRNGKey(0)
def evaluator(inference: GPT2InferenceMask):
nonlocal policy_prng
policy_prng, new_key = jax.random.split(policy_prng)
all_results = dict()
interactions = dict()
if do_reward_eval:
if use_reranker_for_reward_eval:
if policy_do_sample:
policy = ReRankerSamplePolicy( | proposal_fn=maze_proposal_function, | 5 | 2023-11-21 00:16:42+00:00 | 12k |
jzmzhong/Automatic-Prosody-Annotator-with-SSWP-CLAP | src/clap_module/factory.py | [
{
"identifier": "CLAP",
"path": "src/clap_module/model.py",
"snippet": "class CLAP(nn.Module):\r\n def __init__(\r\n self,\r\n args,\r\n joint_embed_shape: int,\r\n audio_cfg: CLAPAudioCfp,\r\n text_cfg: CLAPTextCfg,\r\n enable_fusion:... | import json
import logging
import os
import re
import torch
from copy import deepcopy
from pathlib import Path
from .model import CLAP, convert_weights_to_fp16
| 7,879 | checkpoint_path (str): checkpoint path
map_location (str, optional): a function, :class:`torch.device`, string or a dict specifying how to
remap storage locations. Defaults to "cpu".
skip_params (bool, optional): Remove the module from the key field. Defaults to True.
Returns:
state_dict (dict): model state dict
"""
checkpoint = torch.load(checkpoint_path, map_location=map_location)
if isinstance(checkpoint, dict) and "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
else:
state_dict = checkpoint
if skip_params:
if next(iter(state_dict.items()))[0].startswith("module"):
state_dict = {k[7:]: v for k, v in state_dict.items()}
return state_dict
def create_model(
args,
model_name: str,
precision: str = "fp32",
device: torch.device = torch.device("cpu"),
jit: bool = False,
):
"""Create a CLAP model from a model config.
Args:
args (argparse.Namespace): Command-line arguments.
model_name (str): model name
precision (str, optional): Model parameter accuracy. Defaults to "fp32".
device (torch.device, optional): device. Defaults to torch.device("cpu").
jit (bool, optional): torch.jit.script operations. Defaults to False.
Returns:
model (nn.Module): CLAP model
model_cfg (dict): model config
"""
if model_name in _MODEL_CONFIGS:
logging.info(f"Loading {model_name} model config.")
model_cfg = deepcopy(_MODEL_CONFIGS[model_name])
else:
logging.error(
f"Model config for {model_name} not found; available models {list_models()}."
)
raise RuntimeError(f"Model config for {model_name} not found.")
model = CLAP(args, **model_cfg)
# load pretrained CLAP model
if args.pretrained:
pretrained_clap = torch.load(args.pretrained, map_location='cpu')
model.load_state_dict(pretrained_clap["state_dict"], strict=False)
logging.info(f"Loaded pretrained CLAP model weights !!!")
else:
# load pretrained audio encoder
pretrained_audio = model_cfg["audio_cfg"]["pretrained_audio"]
amodel_type = model_cfg["audio_cfg"]["model_type"]
if pretrained_audio:
if amodel_type.startswith('PANN'):
if 'Cnn14_mAP' in pretrained_audio: # official checkpoint
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['model']
keys = list(audio_ckpt.keys())
for key in keys:
if 'spectrogram_extractor' not in key and 'logmel_extractor' not in key:
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key] = v
# checkpoint trained via HTSAT codebase
elif os.path.basename(pretrained_audio).startswith('PANN'):
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['state_dict']
keys = list(audio_ckpt.keys())
for key in keys:
if key.startswith('sed_model'):
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key[10:]] = v
elif os.path.basename(pretrained_audio).startswith(
'finetuned'): # checkpoint trained via linear probe codebase
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
else:
raise ValueError('Unknown audio checkpoint')
elif amodel_type.startswith('HTSAT'):
if 'HTSAT_AudioSet_Saved' in pretrained_audio: # official checkpoint
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['state_dict']
keys = list(audio_ckpt.keys())
for key in keys:
if key.startswith('sed_model') and ('spectrogram_extractor' not in key
and 'logmel_extractor' not in key):
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key[10:]] = v
# checkpoint trained via HTSAT codebase
elif os.path.basename(pretrained_audio).startswith('HTSAT'):
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['state_dict']
keys = list(audio_ckpt.keys())
for key in keys:
if key.startswith('sed_model'):
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key[10:]] = v
elif os.path.basename(pretrained_audio).startswith(
'finetuned'): # checkpoint trained via linear probe codebase
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
else:
raise ValueError('Unknown audio checkpoint')
else:
raise f'this audio encoder pretrained checkpoint is not support'
model.load_state_dict(audio_ckpt, strict=False)
logging.info(f"Loading pretrained {amodel_type} weights ({pretrained_audio}).")
param_names = [n for n, p in model.named_parameters()]
for n in param_names:
print(n, "\t", "Loaded" if n in audio_ckpt else "Unloaded")
model.to(device=device)
if precision == "fp16":
assert device.type != "cpu"
|
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r"(\d+)", string_.lower())]
def _rescan_model_configs():
"""Rescan model config directory for new configs.
"""
global _MODEL_CONFIGS
config_ext = (".json",)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f"*{ext}"))
for cf in config_files:
with open(cf, "r") as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ("joint_embed_shape", "audio_cfg", "text_cfg")):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {
k: v
for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))
}
_rescan_model_configs() # initial populate of model config registry
def load_state_dict(checkpoint_path: str, map_location="cpu", skip_params=True):
"""Load a checkpoint from a file.
Args:
checkpoint_path (str): checkpoint path
map_location (str, optional): a function, :class:`torch.device`, string or a dict specifying how to
remap storage locations. Defaults to "cpu".
skip_params (bool, optional): Remove the module from the key field. Defaults to True.
Returns:
state_dict (dict): model state dict
"""
checkpoint = torch.load(checkpoint_path, map_location=map_location)
if isinstance(checkpoint, dict) and "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
else:
state_dict = checkpoint
if skip_params:
if next(iter(state_dict.items()))[0].startswith("module"):
state_dict = {k[7:]: v for k, v in state_dict.items()}
return state_dict
def create_model(
args,
model_name: str,
precision: str = "fp32",
device: torch.device = torch.device("cpu"),
jit: bool = False,
):
"""Create a CLAP model from a model config.
Args:
args (argparse.Namespace): Command-line arguments.
model_name (str): model name
precision (str, optional): Model parameter accuracy. Defaults to "fp32".
device (torch.device, optional): device. Defaults to torch.device("cpu").
jit (bool, optional): torch.jit.script operations. Defaults to False.
Returns:
model (nn.Module): CLAP model
model_cfg (dict): model config
"""
if model_name in _MODEL_CONFIGS:
logging.info(f"Loading {model_name} model config.")
model_cfg = deepcopy(_MODEL_CONFIGS[model_name])
else:
logging.error(
f"Model config for {model_name} not found; available models {list_models()}."
)
raise RuntimeError(f"Model config for {model_name} not found.")
model = CLAP(args, **model_cfg)
# load pretrained CLAP model
if args.pretrained:
pretrained_clap = torch.load(args.pretrained, map_location='cpu')
model.load_state_dict(pretrained_clap["state_dict"], strict=False)
logging.info(f"Loaded pretrained CLAP model weights !!!")
else:
# load pretrained audio encoder
pretrained_audio = model_cfg["audio_cfg"]["pretrained_audio"]
amodel_type = model_cfg["audio_cfg"]["model_type"]
if pretrained_audio:
if amodel_type.startswith('PANN'):
if 'Cnn14_mAP' in pretrained_audio: # official checkpoint
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['model']
keys = list(audio_ckpt.keys())
for key in keys:
if 'spectrogram_extractor' not in key and 'logmel_extractor' not in key:
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key] = v
# checkpoint trained via HTSAT codebase
elif os.path.basename(pretrained_audio).startswith('PANN'):
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['state_dict']
keys = list(audio_ckpt.keys())
for key in keys:
if key.startswith('sed_model'):
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key[10:]] = v
elif os.path.basename(pretrained_audio).startswith(
'finetuned'): # checkpoint trained via linear probe codebase
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
else:
raise ValueError('Unknown audio checkpoint')
elif amodel_type.startswith('HTSAT'):
if 'HTSAT_AudioSet_Saved' in pretrained_audio: # official checkpoint
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['state_dict']
keys = list(audio_ckpt.keys())
for key in keys:
if key.startswith('sed_model') and ('spectrogram_extractor' not in key
and 'logmel_extractor' not in key):
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key[10:]] = v
# checkpoint trained via HTSAT codebase
elif os.path.basename(pretrained_audio).startswith('HTSAT'):
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['state_dict']
keys = list(audio_ckpt.keys())
for key in keys:
if key.startswith('sed_model'):
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key[10:]] = v
elif os.path.basename(pretrained_audio).startswith(
'finetuned'): # checkpoint trained via linear probe codebase
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
else:
raise ValueError('Unknown audio checkpoint')
else:
raise f'this audio encoder pretrained checkpoint is not support'
model.load_state_dict(audio_ckpt, strict=False)
logging.info(f"Loading pretrained {amodel_type} weights ({pretrained_audio}).")
param_names = [n for n, p in model.named_parameters()]
for n in param_names:
print(n, "\t", "Loaded" if n in audio_ckpt else "Unloaded")
model.to(device=device)
if precision == "fp16":
assert device.type != "cpu"
| convert_weights_to_fp16(model)
| 1 | 2023-11-25 02:38:32+00:00 | 12k |
ubc-vision/StableKeypoints | unsupervised_keypoints/main.py | [
{
"identifier": "load_ldm",
"path": "unsupervised_keypoints/optimize_token.py",
"snippet": "def load_ldm(device, type=\"CompVis/stable-diffusion-v1-4\", feature_upsample_res=256):\n scheduler = DDIMScheduler(\n beta_start=0.00085,\n beta_end=0.012,\n beta_schedule=\"scaled_linear... | import os
import wandb
import numpy as np
import argparse
import torch
import numpy as np
from unsupervised_keypoints.optimize_token import load_ldm
from unsupervised_keypoints.optimize import optimize_embedding
from unsupervised_keypoints.keypoint_regressor import (
find_best_indices,
precompute_all_keypoints,
return_regressor,
return_regressor_visible,
return_regressor_human36m,
)
from unsupervised_keypoints.eval import evaluate
from unsupervised_keypoints.visualize import visualize_attn_maps | 9,555 | )
parser.add_argument(
"--max_loc_strategy",
type=str,
default="argmax",
choices=["argmax", "weighted_avg"],
help="strategy for choosing max location in the attention map",
)
parser.add_argument(
"--evaluation_method",
type=str,
default="inter_eye_distance",
choices=["inter_eye_distance", "visible", "mean_average_error", "pck", "orientation_invariant"],
help="strategy for evaluation",
)
parser.add_argument(
"--min_dist",
type=float,
default=0.1,
help="minimum distance between the keypoints, as a fraction of the image size",
)
parser.add_argument(
"--furthest_point_num_samples",
type=int,
default=25,
help="the number of samples to use if using the furthest point strategy",
)
parser.add_argument(
"--num_indices",
type=int,
default=100,
help="the number of samples to use for finding the indices of the best tokens",
)
parser.add_argument(
"--num_subjects",
type=int,
default=1,
help="the number of subjects within each image",
)
parser.add_argument(
"--sharpening_loss_weight",
type=float,
default=100,
help="Weight of the sharpening loss",
)
parser.add_argument(
"--equivariance_attn_loss_weight",
type=float,
default=1000.0,
help="Weight of the old equivariance loss",
)
parser.add_argument("--layers", type=int, nargs="+", default=[0, 1, 2, 3])
parser.add_argument(
"--noise_level",
type=int,
default=-1,
help="noise level for the test set between 0 and 49 where 0 is the highest noise level and 49 is the lowest noise level",
)
parser.add_argument(
"--max_num_points",
type=int,
default=50_000,
help="number of samples to precompute",
)
parser.add_argument(
"--sigma", type=float, default=2.0, help="sigma for the gaussian kernel"
)
parser.add_argument(
"--augment_degrees",
type=float,
default=15.0,
help="rotation degrees for augmentation",
)
parser.add_argument(
"--augment_scale",
type=float,
# 2 arguments
nargs="+",
default=[0.8, 1.0],
help="scale factor for augmentation",
)
parser.add_argument(
"--augment_translate",
type=float,
nargs="+",
default=[0.25, 0.25],
help="amount of translation for augmentation along x and y axis",
)
parser.add_argument(
"--augmentation_iterations",
type=int,
default=10,
help="number of iterations for augmentation",
)
# store true the boolean argument 'visualize'
parser.add_argument(
"--visualize", action="store_true", help="visualize the attention maps"
)
parser.add_argument(
"--validation", action="store_true", help="use the validation sets instead of the training/testing set"
)
parser.add_argument("--top_k", type=int, default=10, help="number of points to choose")
args = parser.parse_args()
ldm, controllers, num_gpus = load_ldm(args.device, args.model_type, feature_upsample_res=args.feature_upsample_res)
# if args.save_folder doesnt exist create it
if not os.path.exists(args.save_folder):
os.makedirs(args.save_folder)
# print number of gpus
print("Number of GPUs: ", torch.cuda.device_count())
if args.wandb:
# start a wandb session
wandb.init(project="attention_maps", name=args.wandb_name, config=vars(args))
if args.start_from_stage == "optimize":
|
# Argument parsing
parser = argparse.ArgumentParser(description="optimize a class embedding")
# Network details
parser.add_argument(
"--model_type",
type=str,
default="runwayml/stable-diffusion-v1-5",
help="ldm model type",
)
# Dataset details
parser.add_argument(
"--dataset_loc",
type=str,
default="~",
help="Path to dataset",
)
parser.add_argument(
"--save_folder",
type=str,
default="outputs",
help="Where to save visualizations and checkpoints",
)
parser.add_argument(
"--wandb_name",
type=str,
default="temp",
help="name of the wandb run",
)
parser.add_argument(
"--dataset_name",
# set the choices to be "mafl" and "celeba_aligned"
choices=["celeba_aligned", "celeba_wild", "cub_aligned", "cub_001", "cub_002", "cub_003", "cub_all", "deepfashion", "taichi", "human3.6m", "unaligned_human3.6m", "custom"],
type=str,
default="celeba_aligned",
help="name of the dataset to use",
)
parser.add_argument(
"--max_len",
type=int,
default=-1,
help="max length of the dataset. -1 means no max length",
)
parser.add_argument(
"--start_from_stage",
choices=["optimize", "find_indices", "precompute", "evaluate"],
type=str,
default="optimize",
help="Specify the stage from which the process should start."
)
parser.add_argument("--device", type=str, default="cuda:0", help="device to use")
parser.add_argument("--wandb", action="store_true", help="wandb logging")
parser.add_argument("--lr", type=float, default=5e-3, help="learning rate")
parser.add_argument(
"--num_steps", type=int, default=500, help="number of steps to optimize for"
)
parser.add_argument(
"--num_tokens", type=int, default=500, help="number of tokens to optimize"
)
parser.add_argument(
"--feature_upsample_res", type=int, default=128, help="upsampled resolution for latent features grabbed from the attn operation"
)
parser.add_argument(
"--batch_size", type=int, default=4, help="size of the batch for optimization"
)
parser.add_argument(
"--top_k_strategy",
type=str,
default="gaussian",
choices=["entropy", "gaussian", "consistent"],
help="strategy for choosing top k tokens",
)
parser.add_argument(
"--max_loc_strategy",
type=str,
default="argmax",
choices=["argmax", "weighted_avg"],
help="strategy for choosing max location in the attention map",
)
parser.add_argument(
"--evaluation_method",
type=str,
default="inter_eye_distance",
choices=["inter_eye_distance", "visible", "mean_average_error", "pck", "orientation_invariant"],
help="strategy for evaluation",
)
parser.add_argument(
"--min_dist",
type=float,
default=0.1,
help="minimum distance between the keypoints, as a fraction of the image size",
)
parser.add_argument(
"--furthest_point_num_samples",
type=int,
default=25,
help="the number of samples to use if using the furthest point strategy",
)
parser.add_argument(
"--num_indices",
type=int,
default=100,
help="the number of samples to use for finding the indices of the best tokens",
)
parser.add_argument(
"--num_subjects",
type=int,
default=1,
help="the number of subjects within each image",
)
parser.add_argument(
"--sharpening_loss_weight",
type=float,
default=100,
help="Weight of the sharpening loss",
)
parser.add_argument(
"--equivariance_attn_loss_weight",
type=float,
default=1000.0,
help="Weight of the old equivariance loss",
)
parser.add_argument("--layers", type=int, nargs="+", default=[0, 1, 2, 3])
parser.add_argument(
"--noise_level",
type=int,
default=-1,
help="noise level for the test set between 0 and 49 where 0 is the highest noise level and 49 is the lowest noise level",
)
parser.add_argument(
"--max_num_points",
type=int,
default=50_000,
help="number of samples to precompute",
)
parser.add_argument(
"--sigma", type=float, default=2.0, help="sigma for the gaussian kernel"
)
parser.add_argument(
"--augment_degrees",
type=float,
default=15.0,
help="rotation degrees for augmentation",
)
parser.add_argument(
"--augment_scale",
type=float,
# 2 arguments
nargs="+",
default=[0.8, 1.0],
help="scale factor for augmentation",
)
parser.add_argument(
"--augment_translate",
type=float,
nargs="+",
default=[0.25, 0.25],
help="amount of translation for augmentation along x and y axis",
)
parser.add_argument(
"--augmentation_iterations",
type=int,
default=10,
help="number of iterations for augmentation",
)
# store true the boolean argument 'visualize'
parser.add_argument(
"--visualize", action="store_true", help="visualize the attention maps"
)
parser.add_argument(
"--validation", action="store_true", help="use the validation sets instead of the training/testing set"
)
parser.add_argument("--top_k", type=int, default=10, help="number of points to choose")
args = parser.parse_args()
ldm, controllers, num_gpus = load_ldm(args.device, args.model_type, feature_upsample_res=args.feature_upsample_res)
# if args.save_folder doesnt exist create it
if not os.path.exists(args.save_folder):
os.makedirs(args.save_folder)
# print number of gpus
print("Number of GPUs: ", torch.cuda.device_count())
if args.wandb:
# start a wandb session
wandb.init(project="attention_maps", name=args.wandb_name, config=vars(args))
if args.start_from_stage == "optimize": | embedding = optimize_embedding( | 1 | 2023-11-23 00:04:17+00:00 | 12k |
BigRoy/usd-qtpy | usd_qtpy/prim_hierarchy.py | [
{
"identifier": "get_prim_types_by_group",
"path": "usd_qtpy/lib/usd.py",
"snippet": "def get_prim_types_by_group() -> dict:\n \"\"\"Return all registered concrete type names by nice plug-in grouping.\n\n Returns:\n dict: Schema type names grouped by plug-in name.\n\n \"\"\"\n\n plug_... | import logging
from functools import partial
from qtpy import QtWidgets, QtCore
from pxr import Sdf
from .lib.usd import (
get_prim_types_by_group,
parent_prims,
remove_spec,
unique_name,
)
from .lib.usd_merge_spec import copy_spec_merge
from .lib.qt import iter_model_rows
from .prim_delegate import DrawRectsDelegate
from .prim_hierarchy_model import HierarchyModel
from .references import ReferenceListWidget
from .variants import CreateVariantSetDialog, VariantSetsWidget | 8,495 | type_name = action.text()
# Ensure unique name
prim_path = parent_path.AppendChild(type_name)
prim_path = unique_name(stage, prim_path)
if type_name == "Def":
# Typeless
type_name = ""
# Define prim and signal change to the model
# TODO: Remove signaling once model listens to changes
current_rows = model.rowCount(index)
model.beginInsertRows(index, current_rows, current_rows+1)
new_prim = stage.DefinePrim(prim_path, type_name)
self.select_paths([new_prim.GetPath()])
model.endInsertRows()
# Create Prims
create_prim_menu = menu.addMenu("Create Prim")
create_prim_menu.addAction("Def")
create_prim_menu.addAction("Scope")
create_prim_menu.addAction("Xform")
create_prim_menu.addSeparator()
create_prim_menu.addAction("Cone")
create_prim_menu.addAction("Cube")
create_prim_menu.addAction("Cylinder")
create_prim_menu.addAction("Sphere")
create_prim_menu.addSeparator()
create_prim_menu.addAction("DistantLight")
create_prim_menu.addAction("DomeLight")
create_prim_menu.addAction("RectLight")
create_prim_menu.addAction("SphereLight")
create_prim_menu.addSeparator()
create_prim_menu.addAction("Camera")
create_prim_menu.addSeparator()
# TODO: Cache this submenu?
types_by_group = get_prim_types_by_group()
all_registered_menu = create_prim_menu.addMenu("All Registered")
for group, types in types_by_group.items():
group_menu = all_registered_menu.addMenu(group)
for type_name in types:
group_menu.addAction(type_name)
create_prim_menu.triggered.connect(create_prim)
# Set and clear default prim
if parent_path.IsRootPrimPath():
# This prim is a primitive directly under root so can be an
# active prim
if parent == stage.GetDefaultPrim():
label = "Clear default prim"
action = menu.addAction(label)
tip = (
"Clear the default prim from the stage's root layer.\n"
)
action.setToolTip(tip)
action.setStatusTip(tip)
action.triggered.connect(partial(stage.ClearDefaultPrim))
else:
label = "Set as default prim"
action = menu.addAction(label)
tip = "Set prim as default prim on the stage's root layer."
action.setToolTip(tip)
action.setStatusTip(tip)
action.triggered.connect(partial(stage.SetDefaultPrim, parent))
# Allow referencing / payloads / variants management
if not parent_path.IsAbsoluteRootPath():
action = menu.addAction("Add reference/payload..")
action.triggered.connect(partial(
self.on_manage_prim_reference_payload, parent)
)
def _add_variant_set(prim):
# TODO: maybe directly allow managing the individual variants
# from the same UI; and allow setting the default variant
# Prompt for a variant set name
name = CreateVariantSetDialog.get_variant_set_name(parent=self)
if name is not None:
# Create the variant set, even allowing to create it
# without populating a variant name
prim.GetVariantSets().AddVariantSet(name)
action = menu.addAction("Create Variant Set")
action.triggered.connect(partial(_add_variant_set, parent))
# Get mouse position
global_pos = self.viewport().mapToGlobal(point)
menu.exec_(global_pos)
def on_manage_prim_reference_payload(self, prim):
widget = ReferenceListWidget(prim=prim, parent=self)
widget.resize(800, 300)
widget.show()
def on_prim_tag_clicked(self, event, index, block):
text = block.get("text")
if text == "DFT":
# Allow to clear the prim from a menu
model = self.model()
stage = model.stage
menu = QtWidgets.QMenu(parent=self)
action = menu.addAction("Clear default prim")
tip = (
"Clear the default prim from the stage's root layer.\n"
)
action.setToolTip(tip)
action.setStatusTip(tip)
action.triggered.connect(partial(stage.ClearDefaultPrim))
point = event.position().toPoint()
menu.exec_(self.mapToGlobal(point))
elif text == "REF":
prim = index.data(HierarchyModel.PrimRole)
self.on_manage_prim_reference_payload(prim)
elif text == "VAR":
prim = index.data(HierarchyModel.PrimRole)
|
log = logging.getLogger(__name__)
class View(QtWidgets.QTreeView):
# TODO: Add shortcuts
# CTRL + D: Duplicate
# CTRL + G: Group (add Xform above current selection)
# Delete or backspace: Remove the selected prims
def __init__(self, *args, **kwargs):
super(View, self).__init__(*args, **kwargs)
self.setHeaderHidden(True)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.on_context_menu)
self._delegate = DrawRectsDelegate(parent=self)
self.setItemDelegateForColumn(0, self._delegate)
self._delegate.rect_clicked.connect(self.on_prim_tag_clicked)
def on_context_menu(self, point):
index = self.indexAt(point)
model = self.model()
stage = model.stage
parent = index.data(HierarchyModel.PrimRole)
if not parent:
parent = stage.GetPseudoRoot()
parent_path = parent.GetPath()
menu = QtWidgets.QMenu(self)
def create_prim(action):
type_name = action.text()
# Ensure unique name
prim_path = parent_path.AppendChild(type_name)
prim_path = unique_name(stage, prim_path)
if type_name == "Def":
# Typeless
type_name = ""
# Define prim and signal change to the model
# TODO: Remove signaling once model listens to changes
current_rows = model.rowCount(index)
model.beginInsertRows(index, current_rows, current_rows+1)
new_prim = stage.DefinePrim(prim_path, type_name)
self.select_paths([new_prim.GetPath()])
model.endInsertRows()
# Create Prims
create_prim_menu = menu.addMenu("Create Prim")
create_prim_menu.addAction("Def")
create_prim_menu.addAction("Scope")
create_prim_menu.addAction("Xform")
create_prim_menu.addSeparator()
create_prim_menu.addAction("Cone")
create_prim_menu.addAction("Cube")
create_prim_menu.addAction("Cylinder")
create_prim_menu.addAction("Sphere")
create_prim_menu.addSeparator()
create_prim_menu.addAction("DistantLight")
create_prim_menu.addAction("DomeLight")
create_prim_menu.addAction("RectLight")
create_prim_menu.addAction("SphereLight")
create_prim_menu.addSeparator()
create_prim_menu.addAction("Camera")
create_prim_menu.addSeparator()
# TODO: Cache this submenu?
types_by_group = get_prim_types_by_group()
all_registered_menu = create_prim_menu.addMenu("All Registered")
for group, types in types_by_group.items():
group_menu = all_registered_menu.addMenu(group)
for type_name in types:
group_menu.addAction(type_name)
create_prim_menu.triggered.connect(create_prim)
# Set and clear default prim
if parent_path.IsRootPrimPath():
# This prim is a primitive directly under root so can be an
# active prim
if parent == stage.GetDefaultPrim():
label = "Clear default prim"
action = menu.addAction(label)
tip = (
"Clear the default prim from the stage's root layer.\n"
)
action.setToolTip(tip)
action.setStatusTip(tip)
action.triggered.connect(partial(stage.ClearDefaultPrim))
else:
label = "Set as default prim"
action = menu.addAction(label)
tip = "Set prim as default prim on the stage's root layer."
action.setToolTip(tip)
action.setStatusTip(tip)
action.triggered.connect(partial(stage.SetDefaultPrim, parent))
# Allow referencing / payloads / variants management
if not parent_path.IsAbsoluteRootPath():
action = menu.addAction("Add reference/payload..")
action.triggered.connect(partial(
self.on_manage_prim_reference_payload, parent)
)
def _add_variant_set(prim):
# TODO: maybe directly allow managing the individual variants
# from the same UI; and allow setting the default variant
# Prompt for a variant set name
name = CreateVariantSetDialog.get_variant_set_name(parent=self)
if name is not None:
# Create the variant set, even allowing to create it
# without populating a variant name
prim.GetVariantSets().AddVariantSet(name)
action = menu.addAction("Create Variant Set")
action.triggered.connect(partial(_add_variant_set, parent))
# Get mouse position
global_pos = self.viewport().mapToGlobal(point)
menu.exec_(global_pos)
def on_manage_prim_reference_payload(self, prim):
widget = ReferenceListWidget(prim=prim, parent=self)
widget.resize(800, 300)
widget.show()
def on_prim_tag_clicked(self, event, index, block):
text = block.get("text")
if text == "DFT":
# Allow to clear the prim from a menu
model = self.model()
stage = model.stage
menu = QtWidgets.QMenu(parent=self)
action = menu.addAction("Clear default prim")
tip = (
"Clear the default prim from the stage's root layer.\n"
)
action.setToolTip(tip)
action.setStatusTip(tip)
action.triggered.connect(partial(stage.ClearDefaultPrim))
point = event.position().toPoint()
menu.exec_(self.mapToGlobal(point))
elif text == "REF":
prim = index.data(HierarchyModel.PrimRole)
self.on_manage_prim_reference_payload(prim)
elif text == "VAR":
prim = index.data(HierarchyModel.PrimRole) | widget = VariantSetsWidget(prim=prim, parent=self) | 10 | 2023-11-22 15:56:35+00:00 | 12k |
jefferyZhan/Griffon | llava/model/language_model/mpt/modeling_mpt.py | [
{
"identifier": "attn_bias_shape",
"path": "llava/model/language_model/mpt/attention.py",
"snippet": "def attn_bias_shape(attn_impl, n_heads, seq_len, alibi, prefix_lm, causal, use_sequence_id):\n if attn_impl == 'flash':\n return None\n elif attn_impl in ['torch', 'triton']:\n if al... | import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Optional, Tuple, Union
from transformers import PreTrainedModel, PreTrainedTokenizer, PreTrainedTokenizerFast
from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
from .attention import attn_bias_shape, build_attn_bias
from .blocks import MPTBlock
from .custom_embedding import SharedEmbedding
from .norm import NORM_CLASS_REGISTRY
from .configuration_mpt import MPTConfig
from .adapt_tokenizer import AutoTokenizerForMOD, adapt_tokenizer_for_denoising
from .hf_prefixlm_converter import add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm
from .meta_init_context import init_empty_weights
from .param_init_fns import MODEL_INIT_REGISTRY, generic_param_init_fn_
from .flash_attn_triton import flash_attn_func | 7,343 | """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype)
| """A simple, flexible implementation of a GPT model.
Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py
"""
try:
except:
pass
Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
class MPTPreTrainedModel(PreTrainedModel):
config_class = MPTConfig
base_model_prefix = 'model'
_no_split_modules = ['MPTBlock']
class MPTModel(MPTPreTrainedModel):
def __init__(self, config: MPTConfig):
config._validate_config()
super().__init__(config)
self.attn_impl = config.attn_config['attn_impl']
self.prefix_lm = config.attn_config['prefix_lm']
self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id']
self.alibi = config.attn_config['alibi']
self.alibi_bias_max = config.attn_config['alibi_bias_max']
if config.init_device == 'mixed':
if dist.get_local_rank() == 0:
config.init_device = 'cpu'
else:
config.init_device = 'meta'
if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys():
norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys())
raise NotImplementedError(f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).')
norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()]
self.embedding_fraction = config.embedding_fraction
self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device)
if not self.alibi:
self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device)
self.emb_drop = nn.Dropout(config.emb_pdrop)
self.blocks = nn.ModuleList([MPTBlock(device=config.init_device, **config.to_dict()) for _ in range(config.n_layers)])
self.norm_f = norm_class(config.d_model, device=config.init_device)
if config.init_device != 'meta':
print(f'You are using config.init_device={config.init_device!r}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.')
self.apply(self.param_init_fn)
self.is_causal = not self.prefix_lm
self._attn_bias_initialized = False
self.attn_bias = None
self.attn_bias_shape = attn_bias_shape(self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id)
if config.no_bias:
for module in self.modules():
if hasattr(module, 'bias') and isinstance(module.bias, nn.Parameter):
if config.verbose:
warnings.warn(f'Removing bias ({module.bias}) from {module}.')
module.register_parameter('bias', None)
if config.verbose and config.verbose > 2:
print(self)
if 'verbose' not in self.config.init_config:
self.config.init_config['verbose'] = self.config.verbose
if self.config.init_config['verbose'] > 1:
init_fn_name = self.config.init_config['name']
warnings.warn(f'Using {init_fn_name} initialization.')
self.gradient_checkpointing = False
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value):
self.wte = value
@torch.no_grad()
def _attn_bias(self, device, dtype, attention_mask: Optional[torch.ByteTensor]=None, prefix_mask: Optional[torch.ByteTensor]=None, sequence_id: Optional[torch.LongTensor]=None):
if not self._attn_bias_initialized:
if self.attn_bias_shape:
self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype) | self.attn_bias = build_attn_bias(self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max) | 1 | 2023-11-22 03:18:57+00:00 | 12k |
josejuanmartinez/mindcraft | mindcraft/mind/npc.py | [
{
"identifier": "SummarizerTypes",
"path": "mindcraft/memory/summarizer_types.py",
"snippet": "class SummarizerTypes(Enum):\n T5_SMALL = \"Falconsai/text_summarization\""
},
{
"identifier": "STM",
"path": "mindcraft/memory/stm.py",
"snippet": "class STM:\n def __init__(self,\n ... | from mindcraft.memory.summarizer_types import SummarizerTypes
from mindcraft.memory.stm import STM
from mindcraft.infra.vectorstore.stores_types import StoresTypes
from mindcraft.infra.sft.feedback import Feedback
from mindcraft.features.motivation import Motivation
from mindcraft.features.personality import Personality
from mindcraft.lore.world import World
from mindcraft.infra.embeddings.embeddings_types import EmbeddingsTypes
from mindcraft.memory.ltm import LTM
from mindcraft.settings import LOGGER_FORMAT, DATE_FORMAT
from mindcraft.features.mood import Mood
from mindcraft.styles.conversational_style import ConversationalStyle
import logging | 7,476 |
logging.basicConfig(format=LOGGER_FORMAT, datefmt=DATE_FORMAT, level=logging.INFO)
logger = logging.getLogger(__name__)
class NPC:
def __init__(self,
character_name: str,
description: str,
personalities: list[Personality],
motivations: list[Motivation],
mood: Mood,
store_type: StoresTypes,
ltm_embeddings: EmbeddingsTypes = EmbeddingsTypes.MINILM,
stm_capacity: int = 5,
stm_summarizer: SummarizerTypes = SummarizerTypes.T5_SMALL,
stm_max_summary_length: int = 230,
stm_min_summary_length: int = 30):
"""
A class managing the Non-player Character, including short-term, long-term memory, backgrounds, motivations
to create the answer.
:param character_name: the unique id of the character
:param description: a short description of who your character in the world is
:param personalities: a list of personalities that permanently define the character
(if it's a current state then use it in `moods`)
:param motivations: a list of motivations the character has
:param mood: current mood of the character. They can change over the time.
:param store_type: VectorStore from StoresTypes you prefer to use.
:param ltm_embeddings: embeddings from EmbeddingsTypes you prefer to use
:param stm_capacity: How many interactions from ltm to store
:param stm_summarizer: One of `SummarizerTypes` to use for including the summary of last interactions
:param stm_max_summary_length: max length of the summary
:param stm_min_summary_length: min length of the summary
"""
self._character_name = character_name
self._description = description
self._ltm = LTM(store_type, character_name, ltm_embeddings)
self._stm = STM(self._ltm, stm_capacity, stm_summarizer, stm_max_summary_length, stm_min_summary_length)
self._personalities = personalities
self._motivations = motivations
self._mood = mood
|
logging.basicConfig(format=LOGGER_FORMAT, datefmt=DATE_FORMAT, level=logging.INFO)
logger = logging.getLogger(__name__)
class NPC:
def __init__(self,
character_name: str,
description: str,
personalities: list[Personality],
motivations: list[Motivation],
mood: Mood,
store_type: StoresTypes,
ltm_embeddings: EmbeddingsTypes = EmbeddingsTypes.MINILM,
stm_capacity: int = 5,
stm_summarizer: SummarizerTypes = SummarizerTypes.T5_SMALL,
stm_max_summary_length: int = 230,
stm_min_summary_length: int = 30):
"""
A class managing the Non-player Character, including short-term, long-term memory, backgrounds, motivations
to create the answer.
:param character_name: the unique id of the character
:param description: a short description of who your character in the world is
:param personalities: a list of personalities that permanently define the character
(if it's a current state then use it in `moods`)
:param motivations: a list of motivations the character has
:param mood: current mood of the character. They can change over the time.
:param store_type: VectorStore from StoresTypes you prefer to use.
:param ltm_embeddings: embeddings from EmbeddingsTypes you prefer to use
:param stm_capacity: How many interactions from ltm to store
:param stm_summarizer: One of `SummarizerTypes` to use for including the summary of last interactions
:param stm_max_summary_length: max length of the summary
:param stm_min_summary_length: min length of the summary
"""
self._character_name = character_name
self._description = description
self._ltm = LTM(store_type, character_name, ltm_embeddings)
self._stm = STM(self._ltm, stm_capacity, stm_summarizer, stm_max_summary_length, stm_min_summary_length)
self._personalities = personalities
self._motivations = motivations
self._mood = mood | self._conversational_style = ConversationalStyle(store_type, character_name, ltm_embeddings) | 12 | 2023-11-24 19:23:37+00:00 | 12k |
Algomancer/The-Daily-Train | eval/lm_eval_harness.py | [
{
"identifier": "generate",
"path": "generate/base.py",
"snippet": "@torch.inference_mode()\ndef generate(\n model: GPT,\n prompt: torch.Tensor,\n max_returned_tokens: int,\n *,\n temperature: float = 1.0,\n top_k: Optional[int] = None,\n eos_id: Optional[int] = None,\n) -> torch.Te... | import json
import sys
import lightning as L
import torch
import fnmatch
from pathlib import Path
from typing import Dict, List, Literal, Optional
from lightning.fabric.plugins import BitsandbytesPrecision
from lm_eval import base, evaluator, tasks
from lm_eval.base import BaseLM
from generate.base import generate
from daily_train import GPT, Config, Tokenizer
from daily_train.utils import (
check_valid_checkpoint_dir,
get_default_supported_precision,
gptq_quantization,
load_checkpoint,
)
from jsonargparse import CLI | 7,220 | return self.batch_size_per_gpu * self.fabric.world_size
@property
def device(self):
return self.fabric.device
def tok_encode(self, string: str) -> List[int]:
return self.tokenizer.encode(string, bos=False, eos=False).tolist()
def tok_decode(self, tokens: List[int]) -> str:
t = torch.tensor(tokens)
return self.tokenizer.decode(t)
@torch.inference_mode()
def _model_call(self, inps):
return self.model(inps)
def _model_generate(self, context, max_length, eos_token_id) -> torch.Tensor:
# this only supports batch size 1
assert context.shape[0] == 1
out = generate(self.model, context[0], max_length, eos_id=eos_token_id)
for block in self.model.transformer.h:
block.attn.kv_cache.reset_parameters()
return out.unsqueeze(0)
@torch.inference_mode()
def run_eval(
self, eval_tasks: List[str], num_fewshot: int, limit: Optional[int], bootstrap_iters: int, no_cache: bool
) -> Dict:
# Returns a list containing all values of the task registry that
# match at least one of the patterns
def pattern_match(patterns, source_list):
task_names = set()
for pattern in patterns:
for matching in fnmatch.filter(source_list, pattern):
task_names.add(matching)
return list(task_names)
eval_tasks = pattern_match(eval_tasks, tasks.ALL_TASKS)
print(f"Found tasks: {eval_tasks}")
# **HACK INCOMING**:
# first get task dict on local main rank
# the tasks are downloaded *as they are initialized*, and the downloads don't like multithreading.
# so we download them once on the local main rank, wait, and then initialize them on all other ranks, which *should* load from the cache.
if self.fabric.local_rank == 0:
tasks.get_task_dict(eval_tasks)
# torch barrier
self.fabric.barrier()
tasks.get_task_dict(eval_tasks)
lm = self
if not no_cache:
lm = base.CachingLM(lm, "lm_cache/lit-gpt.db")
results = evaluator.evaluate(
lm=lm,
task_dict=tasks.get_task_dict(eval_tasks),
num_fewshot=num_fewshot,
limit=limit,
bootstrap_iters=bootstrap_iters,
)
results["config"] = dict(
model=self.model.config.name,
batch_size=self.batch_size,
device=str(self.device),
num_fewshot=num_fewshot,
limit=limit,
bootstrap_iters=bootstrap_iters,
no_cache=no_cache,
)
return results
@torch.inference_mode()
def run_eval_harness(
checkpoint_dir: Path,
precision: Optional[str] = None,
quantize: Optional[Literal["bnb.nf4", "bnb.nf4-dq", "bnb.fp4", "bnb.fp4-dq", "bnb.int8", "gptq.int4"]] = None,
eval_tasks: List[str] = ["arc_challenge", "piqa", "hellaswag", "hendrycksTest-*"],
save_filepath: Optional[Path] = None,
num_fewshot: int = 0,
limit: Optional[int] = None,
bootstrap_iters: int = 100000,
no_cache: bool = True,
):
if precision is None:
precision = get_default_supported_precision(training=False)
plugins = None
if quantize is not None and quantize.startswith("bnb."):
if "mixed" in precision:
raise ValueError("Quantization and mixed precision is not supported.")
dtype = {"16-true": torch.float16, "bf16-true": torch.bfloat16, "32-true": torch.float32}[precision]
plugins = BitsandbytesPrecision(quantize[4:], dtype)
precision = None
fabric = L.Fabric(devices=1, precision=precision, plugins=plugins)
check_valid_checkpoint_dir(checkpoint_dir)
tokenizer = Tokenizer(checkpoint_dir)
config = Config.from_json(checkpoint_dir / "lit_config.json")
if quantize == "gptq.int4":
model_file = "lit_model_gptq.4bit.pth"
if not (checkpoint_dir / model_file).is_file():
raise ValueError("Please run `python quantize/gptq.py` first")
else:
model_file = "lit_model.pth"
checkpoint_path = checkpoint_dir / model_file
print(f"Loading model {str(checkpoint_path)!r} with {config.__dict__}", file=sys.stderr)
with fabric.init_module(empty_init=True), gptq_quantization(quantize == "gptq.int4"):
model = GPT(config)
model.eval()
model = fabric.setup_module(model)
|
# support running without installing as a package
wd = Path(__file__).parent.parent.resolve()
sys.path.append(str(wd))
class EvalHarnessBase(BaseLM):
# Credits:
# https://github.com/EleutherAI/gpt-neox/blob/main/eval_tasks/eval_adapter.py
def __init__(self, fabric: L.Fabric, model: GPT, tokenizer: Tokenizer, batch_size: int):
super().__init__()
self.fabric = fabric
self.model = model
self.tokenizer = tokenizer
self.batch_size_per_gpu = batch_size
with fabric.init_tensor():
model.set_kv_cache(batch_size=batch_size)
@classmethod
def create_from_arg_string(cls, arg_string, additional_config=None):
kwargs = {el.split("=")[0]: el.split("=")[1] for el in arg_string.split(",")}
return cls(**kwargs, **additional_config)
@property
def eot_token_id(self):
# we use EOT because end of *text* is more accurate for what we're doing than end of *sentence*
return self.tokenizer.eos_id
@property
def max_length(self):
return self.model.max_seq_length
@property
def vocab_size(self):
return self.tokenizer.vocab_size
@property
def max_gen_toks(self):
return 256
@property
def batch_size(self):
return self.batch_size_per_gpu * self.fabric.world_size
@property
def device(self):
return self.fabric.device
def tok_encode(self, string: str) -> List[int]:
return self.tokenizer.encode(string, bos=False, eos=False).tolist()
def tok_decode(self, tokens: List[int]) -> str:
t = torch.tensor(tokens)
return self.tokenizer.decode(t)
@torch.inference_mode()
def _model_call(self, inps):
return self.model(inps)
def _model_generate(self, context, max_length, eos_token_id) -> torch.Tensor:
# this only supports batch size 1
assert context.shape[0] == 1
out = generate(self.model, context[0], max_length, eos_id=eos_token_id)
for block in self.model.transformer.h:
block.attn.kv_cache.reset_parameters()
return out.unsqueeze(0)
@torch.inference_mode()
def run_eval(
self, eval_tasks: List[str], num_fewshot: int, limit: Optional[int], bootstrap_iters: int, no_cache: bool
) -> Dict:
# Returns a list containing all values of the task registry that
# match at least one of the patterns
def pattern_match(patterns, source_list):
task_names = set()
for pattern in patterns:
for matching in fnmatch.filter(source_list, pattern):
task_names.add(matching)
return list(task_names)
eval_tasks = pattern_match(eval_tasks, tasks.ALL_TASKS)
print(f"Found tasks: {eval_tasks}")
# **HACK INCOMING**:
# first get task dict on local main rank
# the tasks are downloaded *as they are initialized*, and the downloads don't like multithreading.
# so we download them once on the local main rank, wait, and then initialize them on all other ranks, which *should* load from the cache.
if self.fabric.local_rank == 0:
tasks.get_task_dict(eval_tasks)
# torch barrier
self.fabric.barrier()
tasks.get_task_dict(eval_tasks)
lm = self
if not no_cache:
lm = base.CachingLM(lm, "lm_cache/lit-gpt.db")
results = evaluator.evaluate(
lm=lm,
task_dict=tasks.get_task_dict(eval_tasks),
num_fewshot=num_fewshot,
limit=limit,
bootstrap_iters=bootstrap_iters,
)
results["config"] = dict(
model=self.model.config.name,
batch_size=self.batch_size,
device=str(self.device),
num_fewshot=num_fewshot,
limit=limit,
bootstrap_iters=bootstrap_iters,
no_cache=no_cache,
)
return results
@torch.inference_mode()
def run_eval_harness(
checkpoint_dir: Path,
precision: Optional[str] = None,
quantize: Optional[Literal["bnb.nf4", "bnb.nf4-dq", "bnb.fp4", "bnb.fp4-dq", "bnb.int8", "gptq.int4"]] = None,
eval_tasks: List[str] = ["arc_challenge", "piqa", "hellaswag", "hendrycksTest-*"],
save_filepath: Optional[Path] = None,
num_fewshot: int = 0,
limit: Optional[int] = None,
bootstrap_iters: int = 100000,
no_cache: bool = True,
):
if precision is None:
precision = get_default_supported_precision(training=False)
plugins = None
if quantize is not None and quantize.startswith("bnb."):
if "mixed" in precision:
raise ValueError("Quantization and mixed precision is not supported.")
dtype = {"16-true": torch.float16, "bf16-true": torch.bfloat16, "32-true": torch.float32}[precision]
plugins = BitsandbytesPrecision(quantize[4:], dtype)
precision = None
fabric = L.Fabric(devices=1, precision=precision, plugins=plugins)
check_valid_checkpoint_dir(checkpoint_dir)
tokenizer = Tokenizer(checkpoint_dir)
config = Config.from_json(checkpoint_dir / "lit_config.json")
if quantize == "gptq.int4":
model_file = "lit_model_gptq.4bit.pth"
if not (checkpoint_dir / model_file).is_file():
raise ValueError("Please run `python quantize/gptq.py` first")
else:
model_file = "lit_model.pth"
checkpoint_path = checkpoint_dir / model_file
print(f"Loading model {str(checkpoint_path)!r} with {config.__dict__}", file=sys.stderr)
with fabric.init_module(empty_init=True), gptq_quantization(quantize == "gptq.int4"):
model = GPT(config)
model.eval()
model = fabric.setup_module(model)
| load_checkpoint(fabric, model, checkpoint_path) | 7 | 2023-11-22 06:56:19+00:00 | 12k |
Luo-Z13/pointobb | PointOBB/mmdet/models/detectors/PointOBB.py | [
{
"identifier": "DETECTORS",
"path": "PointOBB/mmdet/models/builder.py",
"snippet": "DETECTORS = MODELS"
},
{
"identifier": "TwoStageDetector",
"path": "PointOBB/mmdet/models/detectors/two_stage.py",
"snippet": "class TwoStageDetector(BaseDetector):\n \"\"\"Base class for two-stage de... | import copy
import torch
import numpy as np
import copy
import math
import cv2
import os
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
from mmdet.core.bbox import bbox_xyxy_to_cxcywh
from mmdet.core import bbox_cxcywh_to_xyxy
from mmdet.core.bbox.iou_calculators import bbox_overlaps
from ..builder import build_head
from torch.nn import functional as F
from ..builder import HEADS, build_loss
from typing import Tuple, Union
from torch import Tensor
from torch.nn.functional import grid_sample
from torchvision import transforms
from .P2BNet import gen_proposals_from_cfg
from .utils import resize_proposal, resize_single_proposal, flip_tensor, hboxlist2cxcywha \
,merge_batch_list, split_batch_list, box_iou_rotated, obb2poly_np | 9,067 | pps_angle = pps_angle.unsqueeze(2).expand((pps_center.size()[0], pps_center.size()[1], pps_center.size()[2], 1))
pps = torch.cat([pps_center, pps_wh, pps_angle], dim=-1)
pps = pps.reshape(pps.shape[0], -1, 5)
pps_new.append(pps.reshape(*pps_old.shape[0:2], -1, 5))
pps_new = torch.cat(pps_new, dim=2)
else:
pps_new = pps_old
h, w, _ = img_meta[i]['img_shape']
if cut_mode is 'clamp':
pps_new[..., 0:4:2] = torch.clamp(pps_new[..., 0:4:2], 0, w)
pps_new[..., 1:4:2] = torch.clamp(pps_new[..., 1:4:2], 0, h)
proposals_valid_list.append(pps_new.new_full(
(*pps_new.shape[0:3], 1), 1, dtype=torch.long).reshape(-1, 1))
else:
rot_theta = base_boxes[:,-1].mean()
img_xywh = pps_new.new_tensor([w/2, h/2, w, h, rot_theta]) # (cx,cy,w,h,theta)
iof_in_img = box_iou_rotated(pps_new.reshape(-1, 5), img_xywh.unsqueeze(0), mode='iof')
proposals_valid = iof_in_img > 0.8
proposals_valid_list.append(proposals_valid)
proposal_list.append(pps_new.reshape(-1, 5))
return proposal_list, proposals_valid_list
def gen_rotate_negative_proposals(gt_points, proposal_cfg, aug_generate_proposals, img_meta):
num_neg_gen = proposal_cfg['gen_num_neg']
if num_neg_gen == 0:
return None, None
neg_proposal_list = []
neg_weight_list = []
device = gt_points[0].device
for i in range(len(gt_points)):
pos_box = aug_generate_proposals[i]
h, w, _ = img_meta[i]['img_shape']
x1 = -0.2 * w + torch.rand(num_neg_gen) * (1.2 * w)
y1 = -0.2 * h + torch.rand(num_neg_gen) * (1.2 * h)
x2 = x1 + torch.rand(num_neg_gen) * (1.2 * w - x1)
y2 = y1 + torch.rand(num_neg_gen) * (1.2 * h - y1)
neg_theta = torch.ones_like(x1)*(pos_box[:,-1].mean().cpu())
neg_bboxes = torch.stack([(x1 + x2) / 2, (y1 + y2) / 2,
x2 - x1, y2 - y1, neg_theta], dim=1).to(device)
iou = box_iou_rotated(neg_bboxes, pos_box)
neg_weight = ((iou < 0.3).sum(dim=1) == iou.shape[1])
neg_proposal_list.append(neg_bboxes)
neg_weight_list.append(neg_weight)
return neg_proposal_list, neg_weight_list
def resize_rotate_proposal(img_metas,
batch_gt_bboxes,
batch_proposals,
gt_true_bboxes,
gt_bboxes_ignore,
ratio = 0.5):
'''
batch_gt_bboxes_all: [batch_size, num_proposals, 5] [cx,cy,w,h,a]
batch_proposals_all: [batch_size, num_proposals, 5] [cx,cy,w,h,a]
'''
img_meta_out = copy.deepcopy(img_metas)
batch_gt_bboxes_out = []
batch_proposals_out =[]
gt_true_bboxes_out = []
gt_bboxes_ignore_out = []
for i in range(len(img_metas)):
h, w, c = img_metas[i]['img_shape']
img_meta_out[i]['img_shape'] = (math.ceil(h * ratio), math.ceil(w * ratio), c)
img_meta_out[i]['pad_shape'] = (math.ceil(h * ratio), math.ceil(w * ratio), c)
tmp_gt_bboxes = batch_gt_bboxes[i].clone()
tmp_gt_bboxes[:,:4] = tmp_gt_bboxes[:,:4] * ratio
batch_gt_bboxes_out.append(tmp_gt_bboxes)
tmp_proposal = batch_proposals[i].clone()
tmp_proposal[:,:4] = tmp_proposal[:,:4] * ratio
batch_proposals_out.append(tmp_proposal)
tmp_gt_true_bbox = gt_true_bboxes[i].clone()
tmp_gt_true_bbox[:,:4] = tmp_gt_true_bbox[:,:4] * ratio
gt_true_bboxes_out.append(tmp_gt_true_bbox)
tmp_gt_bboxes_ignore = gt_bboxes_ignore[i].clone()
if gt_bboxes_ignore[i].size(0) != 0:
tmp_gt_bboxes_ignore[:,:,:4] = tmp_gt_bboxes_ignore[:,:4] * ratio
gt_bboxes_ignore_out.append(tmp_gt_bboxes_ignore)
return img_meta_out, batch_gt_bboxes_out, batch_proposals_out, gt_true_bboxes_out, gt_bboxes_ignore_out
@DETECTORS.register_module()
class PointOBB(TwoStageDetector):
def __init__(self,
backbone,
roi_head,
train_cfg,
test_cfg,
construct_view = True,
construct_resize = False,
loss_diff_view=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0),
crop_size = (1024, 1024),
padding = 'reflection',
view_range: Tuple[float, float] = (0.25, 0.75),
bbox_head=None,
neck=None,
pretrained=None,
init_cfg=None):
super(PointOBB, self).__init__(
backbone=backbone,
neck=neck,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
self.num_stages = roi_head.num_stages
self.stage = 0
print(f'========={self.stage}===========')
if bbox_head is not None:
self.with_bbox_head = True
self.bbox_head = build_head(bbox_head)
self.crop_size = crop_size
self.padding = padding
self.view_range = view_range
|
# from mmdet.datasets.utils import obb2poly_np
def resize_image(inputs, resize_ratio=0.5):
down_inputs = F.interpolate(inputs,
scale_factor=resize_ratio,
mode='nearest')
return down_inputs
def fine_rotate_proposals_from_cfg(pseudo_boxes, fine_proposal_cfg, img_meta, stage):
gen_mode = fine_proposal_cfg['gen_proposal_mode']
# cut_mode = fine_proposal_cfg['cut_mode']
cut_mode = None
if isinstance(fine_proposal_cfg['base_ratios'], tuple):
base_ratios = fine_proposal_cfg['base_ratios'][stage - 1]
shake_ratio = fine_proposal_cfg['shake_ratio'][stage - 1]
else:
base_ratios = fine_proposal_cfg['base_ratios']
shake_ratio = fine_proposal_cfg['shake_ratio']
if gen_mode == 'fix_gen':
proposal_list = []
proposals_valid_list = []
for i in range(len(img_meta)):
pps = []
base_boxes = pseudo_boxes[i]
for ratio_w in base_ratios:
for ratio_h in base_ratios:
base_boxes_ = base_boxes.clone()
base_boxes_[:, 2] *= ratio_w
base_boxes_[:, 3] *= ratio_h
pps.append(base_boxes_.unsqueeze(1))
pps_old = torch.cat(pps, dim=1)
if shake_ratio is not None:
pps_new = []
pps_new.append(pps_old.reshape(*pps_old.shape[0:2], -1, 5))
for ratio in shake_ratio:
pps = pps_old.clone()
pps_center = pps[:, :, :2]
pps_wh = pps[:, :, 2:4]
pps_angle = pps[:, :, 4].unsqueeze(2)
pps_x_l = pps_center[:, :, 0] - ratio * pps_wh[:, :, 0]
pps_x_r = pps_center[:, :, 0] + ratio * pps_wh[:, :, 0]
pps_y_t = pps_center[:, :, 1] - ratio * pps_wh[:, :, 1]
pps_y_d = pps_center[:, :, 1] + ratio * pps_wh[:, :, 1]
pps_center_l = torch.stack([pps_x_l, pps_center[:, :, 1]], dim=-1)
pps_center_r = torch.stack([pps_x_r, pps_center[:, :, 1]], dim=-1)
pps_center_t = torch.stack([pps_center[:, :, 0], pps_y_t], dim=-1)
pps_center_d = torch.stack([pps_center[:, :, 0], pps_y_d], dim=-1)
pps_center = torch.stack([pps_center_l, pps_center_r, pps_center_t, pps_center_d], dim=2)
pps_wh = pps_wh.unsqueeze(2).expand(pps_center.shape)
pps_angle = pps_angle.unsqueeze(2).expand((pps_center.size()[0], pps_center.size()[1], pps_center.size()[2], 1))
pps = torch.cat([pps_center, pps_wh, pps_angle], dim=-1)
pps = pps.reshape(pps.shape[0], -1, 5)
pps_new.append(pps.reshape(*pps_old.shape[0:2], -1, 5))
pps_new = torch.cat(pps_new, dim=2)
else:
pps_new = pps_old
h, w, _ = img_meta[i]['img_shape']
if cut_mode is 'clamp':
pps_new[..., 0:4:2] = torch.clamp(pps_new[..., 0:4:2], 0, w)
pps_new[..., 1:4:2] = torch.clamp(pps_new[..., 1:4:2], 0, h)
proposals_valid_list.append(pps_new.new_full(
(*pps_new.shape[0:3], 1), 1, dtype=torch.long).reshape(-1, 1))
else:
rot_theta = base_boxes[:,-1].mean()
img_xywh = pps_new.new_tensor([w/2, h/2, w, h, rot_theta]) # (cx,cy,w,h,theta)
iof_in_img = box_iou_rotated(pps_new.reshape(-1, 5), img_xywh.unsqueeze(0), mode='iof')
proposals_valid = iof_in_img > 0.8
proposals_valid_list.append(proposals_valid)
proposal_list.append(pps_new.reshape(-1, 5))
return proposal_list, proposals_valid_list
def gen_rotate_negative_proposals(gt_points, proposal_cfg, aug_generate_proposals, img_meta):
num_neg_gen = proposal_cfg['gen_num_neg']
if num_neg_gen == 0:
return None, None
neg_proposal_list = []
neg_weight_list = []
device = gt_points[0].device
for i in range(len(gt_points)):
pos_box = aug_generate_proposals[i]
h, w, _ = img_meta[i]['img_shape']
x1 = -0.2 * w + torch.rand(num_neg_gen) * (1.2 * w)
y1 = -0.2 * h + torch.rand(num_neg_gen) * (1.2 * h)
x2 = x1 + torch.rand(num_neg_gen) * (1.2 * w - x1)
y2 = y1 + torch.rand(num_neg_gen) * (1.2 * h - y1)
neg_theta = torch.ones_like(x1)*(pos_box[:,-1].mean().cpu())
neg_bboxes = torch.stack([(x1 + x2) / 2, (y1 + y2) / 2,
x2 - x1, y2 - y1, neg_theta], dim=1).to(device)
iou = box_iou_rotated(neg_bboxes, pos_box)
neg_weight = ((iou < 0.3).sum(dim=1) == iou.shape[1])
neg_proposal_list.append(neg_bboxes)
neg_weight_list.append(neg_weight)
return neg_proposal_list, neg_weight_list
def resize_rotate_proposal(img_metas,
batch_gt_bboxes,
batch_proposals,
gt_true_bboxes,
gt_bboxes_ignore,
ratio = 0.5):
'''
batch_gt_bboxes_all: [batch_size, num_proposals, 5] [cx,cy,w,h,a]
batch_proposals_all: [batch_size, num_proposals, 5] [cx,cy,w,h,a]
'''
img_meta_out = copy.deepcopy(img_metas)
batch_gt_bboxes_out = []
batch_proposals_out =[]
gt_true_bboxes_out = []
gt_bboxes_ignore_out = []
for i in range(len(img_metas)):
h, w, c = img_metas[i]['img_shape']
img_meta_out[i]['img_shape'] = (math.ceil(h * ratio), math.ceil(w * ratio), c)
img_meta_out[i]['pad_shape'] = (math.ceil(h * ratio), math.ceil(w * ratio), c)
tmp_gt_bboxes = batch_gt_bboxes[i].clone()
tmp_gt_bboxes[:,:4] = tmp_gt_bboxes[:,:4] * ratio
batch_gt_bboxes_out.append(tmp_gt_bboxes)
tmp_proposal = batch_proposals[i].clone()
tmp_proposal[:,:4] = tmp_proposal[:,:4] * ratio
batch_proposals_out.append(tmp_proposal)
tmp_gt_true_bbox = gt_true_bboxes[i].clone()
tmp_gt_true_bbox[:,:4] = tmp_gt_true_bbox[:,:4] * ratio
gt_true_bboxes_out.append(tmp_gt_true_bbox)
tmp_gt_bboxes_ignore = gt_bboxes_ignore[i].clone()
if gt_bboxes_ignore[i].size(0) != 0:
tmp_gt_bboxes_ignore[:,:,:4] = tmp_gt_bboxes_ignore[:,:4] * ratio
gt_bboxes_ignore_out.append(tmp_gt_bboxes_ignore)
return img_meta_out, batch_gt_bboxes_out, batch_proposals_out, gt_true_bboxes_out, gt_bboxes_ignore_out
@DETECTORS.register_module()
class PointOBB(TwoStageDetector):
def __init__(self,
backbone,
roi_head,
train_cfg,
test_cfg,
construct_view = True,
construct_resize = False,
loss_diff_view=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0),
crop_size = (1024, 1024),
padding = 'reflection',
view_range: Tuple[float, float] = (0.25, 0.75),
bbox_head=None,
neck=None,
pretrained=None,
init_cfg=None):
super(PointOBB, self).__init__(
backbone=backbone,
neck=neck,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
self.num_stages = roi_head.num_stages
self.stage = 0
print(f'========={self.stage}===========')
if bbox_head is not None:
self.with_bbox_head = True
self.bbox_head = build_head(bbox_head)
self.crop_size = crop_size
self.padding = padding
self.view_range = view_range | self.loss_diff_view = build_loss(loss_diff_view) | 4 | 2023-11-20 07:50:12+00:00 | 12k |
ModelTC/EasyLLM | llm/runners/hf_runner.py | [
{
"identifier": "load_yaml",
"path": "llm/utils/general/yaml_loader.py",
"snippet": "def load_yaml(path):\n with open(path, \"r\")as f:\n yaml_data = yaml.load(f, IncludeLoader)\n # TODO check_cfg\n # cfg check\n return yaml_data"
},
{
"identifier": "parse_args",
"path": "... | import torch
import deepspeed
from torch.nn.parallel import DistributedDataParallel as DDP
from llm.utils.general.yaml_loader import load_yaml
from llm.utils.general.parser_helper import parse_args
from llm.utils.model.optimizer_helper import build_optimizer
from llm.utils.model.lr_helper import build_learning_rate_scheduler
from llm.utils.general.hook_helper import build_hooks
from llm.utils.general.log_helper import default_logger as logger
from llm.data.tokenizer import build_tokenizer
from llm.utils.env.hf_dist_helper import (
setup_distributed,
get_world_size
)
from llm.utils.general.hf_build_utils import (
build_batch_collator,
build_dataloader,
build_dataset,
build_model,
hack_model,
build_augmentation
)
from llm.utils.general.hf_utils import (
hf_inference,
hf_inference_multimodal,
load_from_ds,
load_from_hf,
save_hf_checkpoint,
save_ds_checkpoints
)
from llm.utils.general.grad_scaler import ShardedGradScaler | 7,541 | self.gradient_accumulation_steps = ds_config['gradient_accumulation_steps']
self.global_train_batch_size *= self.gradient_accumulation_steps
self.train_epoch_size //= self.gradient_accumulation_steps
if 'train_batch_size' not in ds_config or ds_config['train_batch_size'] == 'auto':
ds_config['train_batch_size'] = self.global_train_batch_size
if 'train_micro_batch_size_per_gpu' not in ds_config or ds_config['train_micro_batch_size_per_gpu'] == 'auto':
ds_config['train_micro_batch_size_per_gpu'] = self.mirco_train_batch_size
model, optimizer, _, lr_scheduler = deepspeed.initialize(
model=self.model,
optimizer=self.optimizer,
lr_scheduler=self.lr_scheduler,
config=self.config['deepspeed']['config'],
args=None,
)
self.model = model
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
def load_checkpoints(self, load_cfg):
if load_cfg.get('enabled', False):
load_dir = load_cfg.get("load_path", None)
mode = load_cfg.get('load_mode', 'hf')
if not load_dir:
logger.info("No weights need to be loaded.")
return
logger.info(f"Loading model from {load_dir}")
if mode == 'huggingface':
try:
if self.config['model'].get('mode', "from_pretrained") == "from_config":
load_from_hf(self, load_cfg)
except: # noqa
logger.warning("Loading failed by huggingface")
elif mode == 'deepspeed':
try:
load_from_ds(self, load_cfg)
except: # noqa
logger.warning("Loading failed by deepspeed")
else:
raise NotImplementedError
def build_data(self):
self.data_loaders = {}
for data_type in self.config['data'].get('data_types', []):
dataset_cfg = self.config['data'][data_type]['dataset']
dataset = build_dataset(dataset_cfg, self.tokenizer)
batch_collector_cfg = self.config['data'][data_type]['batch_collector']
batch_collector_cfg['kwargs']['offset_label'] = False
batch_collector = build_batch_collator(batch_collector_cfg, self.tokenizer)
if data_type == 'val' or data_type == 'test':
self.config['data'][data_type]['batch_sampler']['infinite'] = False
self.config['data'][data_type]['batch_sampler']['kwargs']['sampler']['type'] = 'dist_test'
data_loader = build_dataloader(self.config['data'][data_type], dataset, batch_collector)
self.data_loaders[data_type] = data_loader
def batch2device(self, batch):
batch['input_ids'] = batch['input_ids'].to(device=torch.device('cuda'))
batch['labels'] = batch['labels'].to(device=torch.device('cuda'))
batch['attention_mask'] = batch['attention_mask'].to(device=torch.device('cuda'))
return batch
def get_batch(self, batch_type='train'):
assert batch_type in self.data_loaders
if not hasattr(self, 'data_iterators'):
self.data_iterators = {}
if batch_type not in self.data_iterators:
iterator = self.data_iterators[batch_type] = iter(self.data_loaders[batch_type])
else:
iterator = self.data_iterators[batch_type]
try:
batch = next(iterator)
except StopIteration as e: # noqa
iterator = self.data_iterators[batch_type] = iter(self.data_loaders[batch_type])
batch = next(iterator)
batch = self.batch2device(batch)
return batch
def _save(self, iteration):
if (iteration + 1) % self.save_interval == 0:
self.save_checkpoint(self.config.get('saver', {}), iteration + 1)
def train(self):
self.model.train()
self._hooks('before_train')
for iteration in range(
self.start_iter * self.gradient_accumulation_steps,
self.train_iters * self.gradient_accumulation_steps,
):
self.cur_iter = iteration // self.gradient_accumulation_steps
batch = self.get_batch()
self._hooks('before_train_iter', self.cur_iter, batch)
with torch.cuda.amp.autocast(enabled=True, dtype=self.dtype):
output = self.model(batch['input_ids'],
batch['attention_mask'],
labels=batch['labels'],
return_dict=True,
use_cache=False)
losses = [val for name, val in output.items() if name.find('loss') >= 0]
loss = sum(losses)
if self.deepspeed:
self.model.backward(loss)
self.model.step()
else:
self.scaler.scale(loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
self.optimizer.zero_grad()
self.lr_scheduler.step()
if (iteration + 1) % self.gradient_accumulation_steps == 0:
self._save(self.cur_iter)
self._hooks('after_train_iter', self.cur_iter, output)
save_hf_checkpoint(self, self.config['saver'], self.train_iters)
self._hooks('after_train')
def infer(self):
self.model.eval()
self.model.cuda()
device = self.model.device
assert 'infer_tokenization' in self.config, "infer_tokenization does not exist."
self.config['infer_tokenization']['kwargs'].update({'tokenizer': self.tokenizer})
|
class HFRunner(object):
def __init__(self, args, cfg=None, training=True):
self.args = args
self.config = cfg
self.training = training
self.deepspeed = False
self.dtype = torch.float16
if 'deepspeed' in self.config:
self.deepspeed = self.config['deepspeed'].get('enabled', False)
self.dtype = self.get_dtype_from_ds(self.config['deepspeed']['config'])
if 'runtime' not in self.config:
self.config['runtime'] = {}
self.gradient_accumulation_steps = self.config['runtime'].get('gradient_accumulation_steps', 1)
self.start_iter = 0
self.build()
if not self.deepspeed:
self.scaler = ShardedGradScaler(enabled=True)
if self.training:
logger.info(f"Start_iter: {self.start_iter}")
logger.info(f"Train_iters: {self.train_iters}")
logger.info(f"Train_epoch_size: {self.train_epoch_size}")
logger.info(f"Total epoch: {self.get_max_train_epoch()}")
logger.info(f"Gradient_accumulation_steps: {self.gradient_accumulation_steps}")
logger.info(f"Global_train_batch_size: {self.global_train_batch_size}")
def get_dtype_from_ds(self, ds_confg):
bf16 = False
fp16 = False
if 'bf16' in ds_confg:
bf16 = ds_confg['bf16'].get('enabled', False)
if 'fp16' in ds_confg:
fp16 = ds_confg['fp16'].get('enabled', False)
assert bf16 != fp16
if bf16:
return torch.bfloat16
if fp16:
return torch.float16
def build(self):
self.build_tokenizer()
self.build_model()
self.build_hooks()
self.build_data()
self.build_trainer()
if self.deepspeed and self.training:
self.deepspeed_init()
self.load_checkpoints(self.config['loader'])
def get_cur_train_epoch(self):
epoch = (self.cur_iter // self.train_epoch_size) + 1
return epoch
def get_max_train_epoch(self):
epoch = (max(self.train_iters - 1, 1)) // self.train_epoch_size + 1
return epoch
def build_optimzer(self):
optimizer_cfg = self.config['trainer']['optimizer']
self.optimizer = build_optimizer(optimizer_cfg, self.model)
def build_lr_scheduler(self):
lr_scheduler_cfg = self.config['trainer']['lr_scheduler']
self.lr_scheduler = build_learning_rate_scheduler(lr_scheduler_cfg, self.optimizer)
def build_tokenizer(self):
self.tokenizer = build_tokenizer(self.config['tokenizer'])
def build_model(self):
self.model = build_model(self.config['model'])
if self.config['runtime'].get('gradient_checkpointing', True):
if hasattr(self.model, "gradient_checkpointing_disable"):
self.model.gradient_checkpointing_enable()
if hasattr(self.model, "base_model"):
self.model.base_model.gradient_checkpointing_enable()
if self.config['model'].get('peft_model_cfg', None) is not None:
modules_to_save = self.config['model']['peft_model_cfg'].get('modules_to_save', [])
if len(modules_to_save) == 0:
hack_model(self.model)
if not self.deepspeed:
self.mdoel = self.model.cuda()
if self.training:
self.model = DDP(self.model,
broadcast_buffers=False,
find_unused_parameters=False)
def build_trainer(self):
world_size = get_world_size()
if self.training:
self.train_iters = self.config['trainer']['train_iters']
self.save_interval = self.config['saver'].get('save_interval', 100)
self.build_optimzer()
self.build_lr_scheduler()
self.mirco_train_batch_size = self.data_loaders['train'].batch_sampler.batch_size
self.train_epoch_size = self.data_loaders['train'].get_epoch_size()
self.global_train_batch_size = self.mirco_train_batch_size * world_size
else:
if 'test' in self.data_loaders:
self.mirco_test_batch_size = self.data_loaders['test'].batch_sampler.batch_size
self.test_epoch_size = self.data_loaders['test'].get_epoch_size()
else:
self.mirco_test_batch_size = 1
self.test_epoch_size = 1
self.global_test_batch_size = self.mirco_test_batch_size * world_size
self.global_train_batch_size = 1
def build_hooks(self):
cfg_hooks = self.config.get('hooks', [])
self._hooks = build_hooks(self, cfg_hooks, is_train=self.training, add_log_if_not_exists=True)
logger.info('build hooks done')
def deepspeed_init(self):
ds_config = self.config['deepspeed']['config']
if ds_config.get('gradient_accumulation_steps', 'auto') == 'auto':
ds_config['gradient_accumulation_steps'] = self.gradient_accumulation_steps
self.gradient_accumulation_steps = ds_config['gradient_accumulation_steps']
self.global_train_batch_size *= self.gradient_accumulation_steps
self.train_epoch_size //= self.gradient_accumulation_steps
if 'train_batch_size' not in ds_config or ds_config['train_batch_size'] == 'auto':
ds_config['train_batch_size'] = self.global_train_batch_size
if 'train_micro_batch_size_per_gpu' not in ds_config or ds_config['train_micro_batch_size_per_gpu'] == 'auto':
ds_config['train_micro_batch_size_per_gpu'] = self.mirco_train_batch_size
model, optimizer, _, lr_scheduler = deepspeed.initialize(
model=self.model,
optimizer=self.optimizer,
lr_scheduler=self.lr_scheduler,
config=self.config['deepspeed']['config'],
args=None,
)
self.model = model
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
def load_checkpoints(self, load_cfg):
if load_cfg.get('enabled', False):
load_dir = load_cfg.get("load_path", None)
mode = load_cfg.get('load_mode', 'hf')
if not load_dir:
logger.info("No weights need to be loaded.")
return
logger.info(f"Loading model from {load_dir}")
if mode == 'huggingface':
try:
if self.config['model'].get('mode', "from_pretrained") == "from_config":
load_from_hf(self, load_cfg)
except: # noqa
logger.warning("Loading failed by huggingface")
elif mode == 'deepspeed':
try:
load_from_ds(self, load_cfg)
except: # noqa
logger.warning("Loading failed by deepspeed")
else:
raise NotImplementedError
def build_data(self):
self.data_loaders = {}
for data_type in self.config['data'].get('data_types', []):
dataset_cfg = self.config['data'][data_type]['dataset']
dataset = build_dataset(dataset_cfg, self.tokenizer)
batch_collector_cfg = self.config['data'][data_type]['batch_collector']
batch_collector_cfg['kwargs']['offset_label'] = False
batch_collector = build_batch_collator(batch_collector_cfg, self.tokenizer)
if data_type == 'val' or data_type == 'test':
self.config['data'][data_type]['batch_sampler']['infinite'] = False
self.config['data'][data_type]['batch_sampler']['kwargs']['sampler']['type'] = 'dist_test'
data_loader = build_dataloader(self.config['data'][data_type], dataset, batch_collector)
self.data_loaders[data_type] = data_loader
def batch2device(self, batch):
batch['input_ids'] = batch['input_ids'].to(device=torch.device('cuda'))
batch['labels'] = batch['labels'].to(device=torch.device('cuda'))
batch['attention_mask'] = batch['attention_mask'].to(device=torch.device('cuda'))
return batch
def get_batch(self, batch_type='train'):
assert batch_type in self.data_loaders
if not hasattr(self, 'data_iterators'):
self.data_iterators = {}
if batch_type not in self.data_iterators:
iterator = self.data_iterators[batch_type] = iter(self.data_loaders[batch_type])
else:
iterator = self.data_iterators[batch_type]
try:
batch = next(iterator)
except StopIteration as e: # noqa
iterator = self.data_iterators[batch_type] = iter(self.data_loaders[batch_type])
batch = next(iterator)
batch = self.batch2device(batch)
return batch
def _save(self, iteration):
if (iteration + 1) % self.save_interval == 0:
self.save_checkpoint(self.config.get('saver', {}), iteration + 1)
def train(self):
self.model.train()
self._hooks('before_train')
for iteration in range(
self.start_iter * self.gradient_accumulation_steps,
self.train_iters * self.gradient_accumulation_steps,
):
self.cur_iter = iteration // self.gradient_accumulation_steps
batch = self.get_batch()
self._hooks('before_train_iter', self.cur_iter, batch)
with torch.cuda.amp.autocast(enabled=True, dtype=self.dtype):
output = self.model(batch['input_ids'],
batch['attention_mask'],
labels=batch['labels'],
return_dict=True,
use_cache=False)
losses = [val for name, val in output.items() if name.find('loss') >= 0]
loss = sum(losses)
if self.deepspeed:
self.model.backward(loss)
self.model.step()
else:
self.scaler.scale(loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
self.optimizer.zero_grad()
self.lr_scheduler.step()
if (iteration + 1) % self.gradient_accumulation_steps == 0:
self._save(self.cur_iter)
self._hooks('after_train_iter', self.cur_iter, output)
save_hf_checkpoint(self, self.config['saver'], self.train_iters)
self._hooks('after_train')
def infer(self):
self.model.eval()
self.model.cuda()
device = self.model.device
assert 'infer_tokenization' in self.config, "infer_tokenization does not exist."
self.config['infer_tokenization']['kwargs'].update({'tokenizer': self.tokenizer}) | sense_tokenization = build_augmentation(self.config["infer_tokenization"]) | 14 | 2023-11-26 10:12:52+00:00 | 12k |
dewgenenny/ScreenSync_v2 | screensync/ui.py | [
{
"identifier": "create_add_bulb_window",
"path": "screensync/screen_sync/ui/add_bulb.py",
"snippet": "def create_add_bulb_window(root, config_manager, refresh_callback):\n # Styles\n style = ttk.Style()\n style.configure('TLabel', background='#404957', foreground='white')\n style.configure(... | import tkinter as tk
import PIL
import os
import pkg_resources
import screensync.screen_sync.color_processing as color_processing
from tkinter import PhotoImage, Toplevel, Label, Entry, Button, Listbox,LabelFrame, ttk, messagebox, END
from PIL import Image, ImageTk
from platformdirs import *
from screensync.screen_sync.ui.add_bulb import create_add_bulb_window
from screensync.screen_sync.ui.remove_bulb import create_remove_bulb_button
from screensync.screen_sync.config_manager import ConfigManager
from screensync.screen_sync.bulb_factory import BulbFactory
from screensync.screen_sync.coordinator import Coordinator
from screensync.screen_sync.stats import runtime_stats
from screensync.screen_sync.graph import create_embedded_graph | 7,593 | # This function will need to be implemented with the actual save logic
print(f"Saving Saturation: {saturation_var.get()}, Capture Size: {capture_size_var.get()}")
def open_general_settings(config_manager):
general_settings_window = Toplevel(root)
general_settings_window.title("General Settings")
general_settings_window.geometry('300x200')
general_settings_window.configure(bg='#404957')
general_settings = config_manager.get_general_settings()
# Saturation Factor Setting
Label(general_settings_window, text="Saturation Factor:").grid(row=0, column=0, sticky='e')
saturation_var = tk.StringVar(value=general_settings.get('saturation_factor', '1.5'))
Entry(general_settings_window, textvariable=saturation_var).grid(row=0, column=1)
# # Screen Capture Size Setting
# Label(general_settings_window, text="Screen Capture Size:").grid(row=1, column=0, sticky='e')
# capture_size_var = tk.StringVar(value=general_settings.get('screen_capture_size', '100, 100'))
# Entry(general_settings_window, textvariable=capture_size_var).grid(row=1, column=1)
# Save Button
save_button = Button(general_settings_window, text="Save",
command=lambda: save_general_settings(saturation_var, capture_size_var))
save_button.grid(row=2, column=0, columnspan=2)
def create_settings_frame(parent, title, settings, entries_dict):
frame = tk.LabelFrame(parent, text=title, bg='#404957', fg='white', font=("TkDefaultFont", 12, "bold"))
frame.pack(padx=10, pady=10, fill='x')
for setting, value in settings.items():
row = tk.Frame(frame, bg='#404957')
row.pack(side='top', fill='x', padx=5, pady=5)
label = tk.Label(row, text=setting.replace('_', ' ').title() + ":", bg='#404957', fg='white')
label.pack(side='left')
entry = tk.Entry(row, bg='white', fg='black')
entry.pack(side='right', expand=True, fill='x')
entry.insert(0, value)
entries_dict[setting] = entry
return frame
def open_settings_window(root, coordinator, config_manager , bulb_factory):
# This dictionary will hold the entry widgets for settings
settings_entries = {
'General': {},
'MQTT': {},
'TuyaSettings': {},
'MQTTSettings': {},
'MagicHomeSettings': {}
}
def save_settings():
# Iterate over each settings section and update the configuration
for section, entries in settings_entries.items():
for setting, entry in entries.items():
config_manager.config[section][setting] = entry.get()
# Save the updated configuration to the file
config_manager.save_config()
# Refresh the bulbs and UI if necessary
refresh_bulb_list()
# Provide feedback that settings have been saved
messagebox.showinfo("Settings", "Settings have been saved successfully.")
def refresh_bulb_list():
bulbs_listbox.delete(0, tk.END) # Clear the existing list
bulbs = config_manager.get_bulbs() # Retrieve updated list of bulbs
for bulb in bulbs:
bulbs_listbox.insert(tk.END, f"{bulb['config_id']} - {bulb['device_id']} - {bulb['placement']}")
reinitialize_bulbs()
settings_window = tk.Toplevel(root)
settings_window.title("Settings")
settings_window.geometry("400x700") # Adjust the size as needed
settings_window.configure(bg='#404957')
settings_window.resizable(False, False)
# General settings frame
general_settings_frame = create_settings_frame(settings_window, "General", config_manager.get_general_settings(), settings_entries['General'])
# MQTT settings frame
mqtt_settings_frame = create_settings_frame(settings_window, "MQTT Server", config_manager.get_mqtt_settings(), settings_entries['MQTT'])
# Tuya settings frame
tuya_settings_frame = create_settings_frame(settings_window, "Tuya Specific", config_manager.get_config_by_section("TuyaSettings"), settings_entries['TuyaSettings'])
# MQTT specific settings frame
mqtt_specific_settings_frame = create_settings_frame(settings_window, "MQTT Specific", config_manager.get_config_by_section("MQTTSettings"), settings_entries['MQTTSettings'])
# MagicHome settings frame
magichome_specific_settings_frame = create_settings_frame(settings_window, "MagicHome Specific", config_manager.get_config_by_section("MagicHomeSettings"), settings_entries['MagicHomeSettings'])
# Add "Save Settings" Button
save_button = tk.Button(settings_window, text="Save Settings", command=save_settings, bg='green', fg='white')
save_button.pack(side='bottom', pady=10)
add_new_frame = tk.LabelFrame(settings_window, text="Add New Bulb", bg='#404957', fg='white', font=("TkDefaultFont", 12, "bold"))
add_new_frame.pack(padx=10, pady=10, fill='x')
# Bulbs listbox with a scrollbar
bulbs_frame = tk.LabelFrame(settings_window, text="Bulbs", bg='#404957', fg='white', font=("TkDefaultFont", 12, "bold"))
bulbs_frame.pack(padx=10, pady=10, fill='both', expand=True)
scrollbar = ttk.Scrollbar(bulbs_frame, orient='vertical')
scrollbar.pack(side='right', fill='y')
bulbs_listbox = tk.Listbox(bulbs_frame, yscrollcommand=scrollbar.set, bg='#D9D9D9', fg='black')
bulbs_listbox.pack(side='left', fill='both', expand=True)
scrollbar.config(command=bulbs_listbox.yview)
#add_bulb_window = create_add_bulb_window(root, config_manager, refresh_ui)
# Add New Button
add_new_button = tk.Button(add_new_frame,bg='#D9D9D9',text=' Add '
|
appname = 'ScreenSync_v2'
appauthor = 'Tom George'
# Global flag to track Shooter Mode state
shooter_mode_active = False
def main():
global config_manager, bulb_factory, bulbs, coordinator
# Check if config directory exists and if not create
os.makedirs(user_data_dir(appname, appauthor), exist_ok=True)
#print(user_data_dir(appname, appauthor) + '/config.ini')
# Initialize necessary objects
config_manager = ConfigManager(user_data_dir(appname, appauthor) + '/config.ini')
bulb_factory = BulbFactory(config_manager)
bulbs = bulb_factory.create_bulbs()
coordinator = Coordinator(bulbs, color_processing)
icon_path = pkg_resources.resource_filename('screensync', 'assets/ScreenSync.ico')
banner_path = pkg_resources.resource_filename('screensync', 'assets/screensync-banner.png')
# Define the main window
root = tk.Tk()
root.title("ScreenSync V2")
root.geometry('245x265') # Width x Height
root.configure(bg='#000000')
root.resizable(False, False)
root.overrideredirect(False)
root.iconbitmap(icon_path)
# Load and resize the banner image
banner_image = Image.open(banner_path)
banner_image = banner_image.resize((200, 55), PIL.Image.Resampling.LANCZOS)
banner_photo = ImageTk.PhotoImage(banner_image)
# Create a Label to display the image
banner_label = tk.Label(root, image=banner_photo, bg='#000000')
banner_label.image = banner_photo # Keep a reference to avoid garbage collection
banner_label.place(x=20, y=5) # Place at the top of the window
# Stats graph frame
stats_frame = tk.Frame(root, bg='#000000', width=227, height=83)
stats_frame.place(x=9, y=60)
update_graph = create_embedded_graph(runtime_stats, stats_frame)
refresh_graph(root, update_graph) # Start the periodic update
# Settings Button
settings_button = tk.Button(root, bg='#D9D9D9', text='Settings',
command=lambda: open_settings_window(root, coordinator, config_manager, bulb_factory))
settings_button.place(x=11, y=160)
# Add New Button
shooter_button = tk.Button(root,bg='#D9D9D9',text='Enable Shooter'
,command=lambda: shooter_clicked(shooter_button, coordinator))
shooter_button.place(x=133, y=160)
# Bind the on_closing function to the window's close event
root.protocol("WM_DELETE_WINDOW", lambda: on_closing(root, coordinator))
# Start/Stop Button
# Start/Stop Button
start_stop_button = tk.Button(root, text="Start", bg='#D9D9D9', width=31, height=3,
command=lambda: start_stop_button_clicked(start_stop_button, coordinator))
start_stop_button.place(x=9, y=200)
root.mainloop()
def toggle_shooter_mode(shooter_button, coordinator):
global shooter_mode_active
if shooter_mode_active:
# Disable Shooter Mode by setting it back to 'normal' or any other default mode
coordinator.set_mode('normal')
shooter_button.config(text="Enable Shooter")
else:
# Enable Shooter Mode
coordinator.set_mode('shooter')
shooter_button.config(text="Disable Shooter")
# Toggle the flag
shooter_mode_active = not shooter_mode_active
# Define a function to be called when the window is closed
def on_closing(root, coordinator):
if coordinator.running:
coordinator.stop() # Make sure to stop the coordinator
root.destroy() # Destroy the main window
# Function to reinitialize bulbs
def reinitialize_bulbs():
global config_manager
config_manager = ConfigManager('./config.ini')
global bulbs
bulbs = bulb_factory.create_bulbs() # Recreate bulbs with new settings
global coordinator
coordinator = Coordinator(bulbs, color_processing)
def shooter_clicked(shooter_button, coordinator):
toggle_shooter_mode(shooter_button, coordinator)
print("Toggle shooter mode clicked")
def start_stop_button_clicked(start_stop_button, coordinator):
if coordinator.running:
coordinator.stop()
start_stop_button.config(text="Start")
else:
coordinator.start()
start_stop_button.config(text="Stop")
def save_general_settings(saturation_var, capture_size_var):
# Here you'll save the general settings back to config.ini
# This function will need to be implemented with the actual save logic
print(f"Saving Saturation: {saturation_var.get()}, Capture Size: {capture_size_var.get()}")
def open_general_settings(config_manager):
general_settings_window = Toplevel(root)
general_settings_window.title("General Settings")
general_settings_window.geometry('300x200')
general_settings_window.configure(bg='#404957')
general_settings = config_manager.get_general_settings()
# Saturation Factor Setting
Label(general_settings_window, text="Saturation Factor:").grid(row=0, column=0, sticky='e')
saturation_var = tk.StringVar(value=general_settings.get('saturation_factor', '1.5'))
Entry(general_settings_window, textvariable=saturation_var).grid(row=0, column=1)
# # Screen Capture Size Setting
# Label(general_settings_window, text="Screen Capture Size:").grid(row=1, column=0, sticky='e')
# capture_size_var = tk.StringVar(value=general_settings.get('screen_capture_size', '100, 100'))
# Entry(general_settings_window, textvariable=capture_size_var).grid(row=1, column=1)
# Save Button
save_button = Button(general_settings_window, text="Save",
command=lambda: save_general_settings(saturation_var, capture_size_var))
save_button.grid(row=2, column=0, columnspan=2)
def create_settings_frame(parent, title, settings, entries_dict):
frame = tk.LabelFrame(parent, text=title, bg='#404957', fg='white', font=("TkDefaultFont", 12, "bold"))
frame.pack(padx=10, pady=10, fill='x')
for setting, value in settings.items():
row = tk.Frame(frame, bg='#404957')
row.pack(side='top', fill='x', padx=5, pady=5)
label = tk.Label(row, text=setting.replace('_', ' ').title() + ":", bg='#404957', fg='white')
label.pack(side='left')
entry = tk.Entry(row, bg='white', fg='black')
entry.pack(side='right', expand=True, fill='x')
entry.insert(0, value)
entries_dict[setting] = entry
return frame
def open_settings_window(root, coordinator, config_manager , bulb_factory):
# This dictionary will hold the entry widgets for settings
settings_entries = {
'General': {},
'MQTT': {},
'TuyaSettings': {},
'MQTTSettings': {},
'MagicHomeSettings': {}
}
def save_settings():
# Iterate over each settings section and update the configuration
for section, entries in settings_entries.items():
for setting, entry in entries.items():
config_manager.config[section][setting] = entry.get()
# Save the updated configuration to the file
config_manager.save_config()
# Refresh the bulbs and UI if necessary
refresh_bulb_list()
# Provide feedback that settings have been saved
messagebox.showinfo("Settings", "Settings have been saved successfully.")
def refresh_bulb_list():
bulbs_listbox.delete(0, tk.END) # Clear the existing list
bulbs = config_manager.get_bulbs() # Retrieve updated list of bulbs
for bulb in bulbs:
bulbs_listbox.insert(tk.END, f"{bulb['config_id']} - {bulb['device_id']} - {bulb['placement']}")
reinitialize_bulbs()
settings_window = tk.Toplevel(root)
settings_window.title("Settings")
settings_window.geometry("400x700") # Adjust the size as needed
settings_window.configure(bg='#404957')
settings_window.resizable(False, False)
# General settings frame
general_settings_frame = create_settings_frame(settings_window, "General", config_manager.get_general_settings(), settings_entries['General'])
# MQTT settings frame
mqtt_settings_frame = create_settings_frame(settings_window, "MQTT Server", config_manager.get_mqtt_settings(), settings_entries['MQTT'])
# Tuya settings frame
tuya_settings_frame = create_settings_frame(settings_window, "Tuya Specific", config_manager.get_config_by_section("TuyaSettings"), settings_entries['TuyaSettings'])
# MQTT specific settings frame
mqtt_specific_settings_frame = create_settings_frame(settings_window, "MQTT Specific", config_manager.get_config_by_section("MQTTSettings"), settings_entries['MQTTSettings'])
# MagicHome settings frame
magichome_specific_settings_frame = create_settings_frame(settings_window, "MagicHome Specific", config_manager.get_config_by_section("MagicHomeSettings"), settings_entries['MagicHomeSettings'])
# Add "Save Settings" Button
save_button = tk.Button(settings_window, text="Save Settings", command=save_settings, bg='green', fg='white')
save_button.pack(side='bottom', pady=10)
add_new_frame = tk.LabelFrame(settings_window, text="Add New Bulb", bg='#404957', fg='white', font=("TkDefaultFont", 12, "bold"))
add_new_frame.pack(padx=10, pady=10, fill='x')
# Bulbs listbox with a scrollbar
bulbs_frame = tk.LabelFrame(settings_window, text="Bulbs", bg='#404957', fg='white', font=("TkDefaultFont", 12, "bold"))
bulbs_frame.pack(padx=10, pady=10, fill='both', expand=True)
scrollbar = ttk.Scrollbar(bulbs_frame, orient='vertical')
scrollbar.pack(side='right', fill='y')
bulbs_listbox = tk.Listbox(bulbs_frame, yscrollcommand=scrollbar.set, bg='#D9D9D9', fg='black')
bulbs_listbox.pack(side='left', fill='both', expand=True)
scrollbar.config(command=bulbs_listbox.yview)
#add_bulb_window = create_add_bulb_window(root, config_manager, refresh_ui)
# Add New Button
add_new_button = tk.Button(add_new_frame,bg='#D9D9D9',text=' Add ' | ,command=lambda: create_add_bulb_window(root, config_manager, refresh_bulb_list)) | 0 | 2023-11-19 10:48:58+00:00 | 12k |
natto-maki/ComfyUI-NegiTools | negi/repos/controlnet_aux/src/controlnet_aux/segment_anything/build_sam.py | [
{
"identifier": "Sam",
"path": "negi/repos/controlnet_aux/src/controlnet_aux/segment_anything/modeling/sam.py",
"snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: Union[ImageEncoderViT, TinyViT],... | import torch
from functools import partial
from .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer, TinyViT | 8,245 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def build_sam_vit_h(checkpoint=None):
return _build_sam(
encoder_embed_dim=1280,
encoder_depth=32,
encoder_num_heads=16,
encoder_global_attn_indexes=[7, 15, 23, 31],
checkpoint=checkpoint,
)
build_sam = build_sam_vit_h
def build_sam_vit_l(checkpoint=None):
return _build_sam(
encoder_embed_dim=1024,
encoder_depth=24,
encoder_num_heads=16,
encoder_global_attn_indexes=[5, 11, 17, 23],
checkpoint=checkpoint,
)
def build_sam_vit_b(checkpoint=None):
return _build_sam(
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
encoder_global_attn_indexes=[2, 5, 8, 11],
checkpoint=checkpoint,
)
def build_sam_vit_t(checkpoint=None):
prompt_embed_dim = 256
image_size = 1024
vit_patch_size = 16
image_embedding_size = image_size // vit_patch_size
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def build_sam_vit_h(checkpoint=None):
return _build_sam(
encoder_embed_dim=1280,
encoder_depth=32,
encoder_num_heads=16,
encoder_global_attn_indexes=[7, 15, 23, 31],
checkpoint=checkpoint,
)
build_sam = build_sam_vit_h
def build_sam_vit_l(checkpoint=None):
return _build_sam(
encoder_embed_dim=1024,
encoder_depth=24,
encoder_num_heads=16,
encoder_global_attn_indexes=[5, 11, 17, 23],
checkpoint=checkpoint,
)
def build_sam_vit_b(checkpoint=None):
return _build_sam(
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
encoder_global_attn_indexes=[2, 5, 8, 11],
checkpoint=checkpoint,
)
def build_sam_vit_t(checkpoint=None):
prompt_embed_dim = 256
image_size = 1024
vit_patch_size = 16
image_embedding_size = image_size // vit_patch_size | mobile_sam = Sam( | 0 | 2023-11-20 13:09:44+00:00 | 12k |
wangermeng2021/llm-webui | main.py | [
{
"identifier": "login_huggingface",
"path": "src/utils/common.py",
"snippet": "def login_huggingface(token,base_model_name_dropdown):\n if base_model_name_dropdown.lower().find(\"llama\") >= 0:\n if token:\n HUGGINGFACE_HUB_TOKEN = token\n print(\"d1:\",HUGGINGFACE_HUB_T... | import pandas as pd
import math
import numpy as np
import gc
import os,requests
import subprocess,threading
import time
import gradio as gr
import os
import traceback
import numpy as np
import glob
import shutil
import torch
import socket
from src.utils.common import login_huggingface
from src.finetune.huggingface_inference import HuggingfaceInference
from src.finetune.llama_cpp_inference import LlamaCppInference
from src.rag.qa_with_rag import QAWithRAG
from src.utils.common import read_yaml,get_first_row_from_dataset,\
get_runs_model_names_from_dir,get_hg_model_names_from_dir,get_hg_model_names_and_gguf_from_dir,validate_model_path,get_runs_models
from src.utils.chat_prompts import get_model_type,get_chat_history_prompt,get_model_prompt_template
from transformers.training_args import OptimizerNames
from huggingface_hub import hf_hub_download
from src.utils import download_model
from pathlib import Path
from src.finetune.qlora_trainer import QloraTrainer
from src.finetune.qlora_trainer import TRAINING_STATUS
from src.utils.download_huggingface_repo import download_model_wrapper,download_dataset_wrapper | 10,464 |
# os.environ['HTTP_PROXY'] = 'http://127.0.0.1:8889'
# os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:8889'
LOCAL_HOST_IP = "0.0.0.0"
TENSORBOARD_URL = "http://" + LOCAL_HOST_IP + ":6006/"
INIT_DATASET_NAME = "test_python_code_instructions_5000_rows"
RAG_DATA_LIST_DROPDOWN = ""
TEXT_SPLITTER_DROPDOWN = ""
CHUNK_SIZE_SLIDER = 0
CHUNK_OVERLAP_SLIDER = -1
SEPARATORS_TEXTBOX = ""
EMBEDDING_MODEL_SOURCE_RADIO = ""
HUB_EMBEDDING_MODEL_NAMES_DROPDOWN = ""
LOCAL_EMBEDDING_MODEL_NAMES_DROPDOWN = ""
CHAT_MODEL_SOURCE_RADIO = ""
HUB_CHAT_MODEL_NAMES_DROPDOWN = ""
LOCAL_CHAT_MODEL_NAMES_DROPDOWN = ""
SEARCH_TOP_K_SLIDER = ""
SEARCH_SCORE_THRESHOLD_SLIDER = ""
training_ret_val = -1
error_msg = ""
current_running_model_name = ""
infer_model = None
stop_generation_status = False
chatbot_history=[]
chatbot_height = 500
rag_chatbot_history=[]
rag_stop_generation_status = False
|
# os.environ['HTTP_PROXY'] = 'http://127.0.0.1:8889'
# os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:8889'
LOCAL_HOST_IP = "0.0.0.0"
TENSORBOARD_URL = "http://" + LOCAL_HOST_IP + ":6006/"
INIT_DATASET_NAME = "test_python_code_instructions_5000_rows"
RAG_DATA_LIST_DROPDOWN = ""
TEXT_SPLITTER_DROPDOWN = ""
CHUNK_SIZE_SLIDER = 0
CHUNK_OVERLAP_SLIDER = -1
SEPARATORS_TEXTBOX = ""
EMBEDDING_MODEL_SOURCE_RADIO = ""
HUB_EMBEDDING_MODEL_NAMES_DROPDOWN = ""
LOCAL_EMBEDDING_MODEL_NAMES_DROPDOWN = ""
CHAT_MODEL_SOURCE_RADIO = ""
HUB_CHAT_MODEL_NAMES_DROPDOWN = ""
LOCAL_CHAT_MODEL_NAMES_DROPDOWN = ""
SEARCH_TOP_K_SLIDER = ""
SEARCH_SCORE_THRESHOLD_SLIDER = ""
training_ret_val = -1
error_msg = ""
current_running_model_name = ""
infer_model = None
stop_generation_status = False
chatbot_history=[]
chatbot_height = 500
rag_chatbot_history=[]
rag_stop_generation_status = False | qa_with_rag = QAWithRAG() | 3 | 2023-11-25 12:37:21+00:00 | 12k |
basf/ARCANA | arcana/procedures/training.py | [
{
"identifier": "logger",
"path": "arcana/logger/logger.py",
"snippet": "APP_LOGGER_NAME = 'ARCANA'\ndef setup_applevel_logger(logger_name = APP_LOGGER_NAME, file_name=None):\ndef get_logger(module_name):"
},
{
"identifier": "train_model",
"path": "arcana/training/train_model.py",
"snipp... | import os
import warnings
import json
import pickle
import numpy as np
import torch
from arcana.logger import logger
from arcana.training import train_model
from arcana.losses.loss import LossFactory
from arcana.regularizations.optimizer_scheduler import SchedulerFactory
from arcana.models.sequence_to_sequence.seq2seq_factory import Seq2SeqFactory
from arcana.procedures.config_handler import ConfigHandler
from arcana.processing.data_processing import DataPreparation
from arcana.utils import utils | 7,439 | self.data_preparation = DataPreparation(self.general_config, self.data_config, self.procedure_config)
# initializing the model class
self.device = None
self.set_device()
# initializing the loss class
self.criterion = None
# initializing the optimizer class
self.optimizer = None
self.scheduler = None
# initializing the model class
self.seq2seq_factory = Seq2SeqFactory(self.model_config)
self.seq_2_seq_trainer = None
#self.model = None
self.train_parameters = None
# initializing the loaders
self.train_loader = None
self.val_loader = None
# get the data splits
if self.general_config.pretrained_model:
if self.procedure_config.transfer_learning:
self.data_splits()
if self.procedure_config.predicting and (not self.procedure_config.transfer_learning):
pass
else:
self.data_splits()
# if ((not self.procedure_config.naive_training) and (not self.procedure_config.transfer_learning) and \
# (not self.procedure_config.optuna_tuning) and (self.procedure_config.predicting)):
# self.data_splits()
def set_device(self):
"""Set the device for training the model
"""
# move to GPU if available
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if self.device.type == "cpu":
torch.set_default_tensor_type("torch.FloatTensor")
if not self.device.type == "cpu":
torch.set_default_tensor_type("torch.cuda.FloatTensor")
log.info(f"device: {self.device}")
def data_splits(self):
"""Get the data splits for training, validation and testing
"""
self.data_preparation.get_data_for_model()
self.data_preparation.prepare_data_for_model()
self.save_data_splits()
def save_data_splits(self):
"""Save the data splits
"""
data_path = os. path.join(self.model_config.result_path, "data_splits")
# save the original data splits
for original_data, data_name in zip([self.data_preparation.df_train_original, self.data_preparation.df_val_original,
self.data_preparation.df_test_original], ["train", "val", "test"]):
original_data.to_csv(os.path.join(data_path, f"{data_name}_original.csv"))
if self.procedure_config.preprocess_data:
# save model data transformation
with open(os.path.join(data_path, "model_data_transformation.pkl"), "wb") as f:
pickle.dump(self.data_preparation.model_data_transformation, f)
# save the test_names of the test data
np.save(os.path.join(data_path, "test_names.npy"), self.data_preparation.test_data_names)
# save the processed data splits
for processed_data, processed_name in zip([self.data_preparation.padded_train_data,
self.data_preparation.padded_val_data, self.data_preparation.padded_test_data],
["train", "val", "test"]):
torch.save(processed_data, os.path.join(data_path, f"{processed_name}_processed.pt"))
def loader_initialization(self):
"""Initialize the data loaders
"""
# define the data loaders
self.train_loader = torch.utils.data.DataLoader(self.data_preparation.padded_train_data,
batch_size=self.model_config.batch_size)
self.val_loader = torch.utils.data.DataLoader(self.data_preparation.padded_val_data,
batch_size=self.model_config.batch_size)
def model_parameter_initialization(self):
"""Initialize the model parameters
"""
# define the data loaders
# self.train_loader = torch.utils.data.DataLoader(self.data_preparation.padded_train_data,
# batch_size=self.model_config.batch_size)
# self.val_loader = torch.utils.data.DataLoader(self.data_preparation.padded_val_data,
# batch_size=self.model_config.batch_size)
# define the model
if self.procedure_config.attention_type == "additive":
self.seq2seq_factory.create_additive_model()
elif self.procedure_config.attention_type == "multihead":
self.seq2seq_factory.create_multihead_model()
# parallelize the model if more than one GPU is available
if torch.cuda.device_count() > 1:
log.info(f"Using {torch.cuda.device_count()} GPUs")
self.seq2seq_factory.seq2seq = torch.nn.DataParallel(self.seq2seq_factory.seq2seq)
def train_element_initialization(self):
"""Initialize the training elements
"""
# define the loss
self.criterion = LossFactory.create_loss(self.model_config)
# define optimizer
optimizer = torch.optim.Adam(self.seq2seq_factory.seq2seq.parameters(),
lr=self.model_config.learning_rate,
weight_decay=self.model_config.weight_decay)
# Instantiate the factory with the optimizer and params
scheduler_factory = SchedulerFactory(optimizer, self.model_config, len_train_loader=len(self.train_loader))
# Get the desired scheduler
scheduler = scheduler_factory.get_scheduler(learning_rate_type = self.procedure_config.learning_rate_type)
# define the trainer
| ''' This module is the main module for training the model. It contains the TrainProcedure class which is the main class'''
# from arcana.plots import plots
warnings.filterwarnings("ignore")
# plots.Plots()
np.random.seed(0)
log = logger.get_logger("arcana.run_procedure")
SEED = 0
torch.cuda.manual_seed(SEED)
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
class TrainProcedure:
"""This class is the main class for training the model. It contains some of the necessary functions for training,
predicting and finetuning the model. The class also contains all the parameters for the training, predicting and
tuning of the model. It also contains the functions for saving the model parameters and the
data splits."""
def __init__(self):
config_handler = ConfigHandler()
self.general_config = config_handler.get_general_config()
self.data_config = config_handler.get_data_config()
self.procedure_config = config_handler.get_procedure_config()
self.model_config = config_handler.get_model_config()
self.model_config.dim_weights = torch.tensor(self.model_config.dim_weights)
# initializing the data preparation class
self.data_preparation = DataPreparation(self.general_config, self.data_config, self.procedure_config)
# initializing the model class
self.device = None
self.set_device()
# initializing the loss class
self.criterion = None
# initializing the optimizer class
self.optimizer = None
self.scheduler = None
# initializing the model class
self.seq2seq_factory = Seq2SeqFactory(self.model_config)
self.seq_2_seq_trainer = None
#self.model = None
self.train_parameters = None
# initializing the loaders
self.train_loader = None
self.val_loader = None
# get the data splits
if self.general_config.pretrained_model:
if self.procedure_config.transfer_learning:
self.data_splits()
if self.procedure_config.predicting and (not self.procedure_config.transfer_learning):
pass
else:
self.data_splits()
# if ((not self.procedure_config.naive_training) and (not self.procedure_config.transfer_learning) and \
# (not self.procedure_config.optuna_tuning) and (self.procedure_config.predicting)):
# self.data_splits()
def set_device(self):
"""Set the device for training the model
"""
# move to GPU if available
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if self.device.type == "cpu":
torch.set_default_tensor_type("torch.FloatTensor")
if not self.device.type == "cpu":
torch.set_default_tensor_type("torch.cuda.FloatTensor")
log.info(f"device: {self.device}")
def data_splits(self):
"""Get the data splits for training, validation and testing
"""
self.data_preparation.get_data_for_model()
self.data_preparation.prepare_data_for_model()
self.save_data_splits()
def save_data_splits(self):
"""Save the data splits
"""
data_path = os. path.join(self.model_config.result_path, "data_splits")
# save the original data splits
for original_data, data_name in zip([self.data_preparation.df_train_original, self.data_preparation.df_val_original,
self.data_preparation.df_test_original], ["train", "val", "test"]):
original_data.to_csv(os.path.join(data_path, f"{data_name}_original.csv"))
if self.procedure_config.preprocess_data:
# save model data transformation
with open(os.path.join(data_path, "model_data_transformation.pkl"), "wb") as f:
pickle.dump(self.data_preparation.model_data_transformation, f)
# save the test_names of the test data
np.save(os.path.join(data_path, "test_names.npy"), self.data_preparation.test_data_names)
# save the processed data splits
for processed_data, processed_name in zip([self.data_preparation.padded_train_data,
self.data_preparation.padded_val_data, self.data_preparation.padded_test_data],
["train", "val", "test"]):
torch.save(processed_data, os.path.join(data_path, f"{processed_name}_processed.pt"))
def loader_initialization(self):
"""Initialize the data loaders
"""
# define the data loaders
self.train_loader = torch.utils.data.DataLoader(self.data_preparation.padded_train_data,
batch_size=self.model_config.batch_size)
self.val_loader = torch.utils.data.DataLoader(self.data_preparation.padded_val_data,
batch_size=self.model_config.batch_size)
def model_parameter_initialization(self):
"""Initialize the model parameters
"""
# define the data loaders
# self.train_loader = torch.utils.data.DataLoader(self.data_preparation.padded_train_data,
# batch_size=self.model_config.batch_size)
# self.val_loader = torch.utils.data.DataLoader(self.data_preparation.padded_val_data,
# batch_size=self.model_config.batch_size)
# define the model
if self.procedure_config.attention_type == "additive":
self.seq2seq_factory.create_additive_model()
elif self.procedure_config.attention_type == "multihead":
self.seq2seq_factory.create_multihead_model()
# parallelize the model if more than one GPU is available
if torch.cuda.device_count() > 1:
log.info(f"Using {torch.cuda.device_count()} GPUs")
self.seq2seq_factory.seq2seq = torch.nn.DataParallel(self.seq2seq_factory.seq2seq)
def train_element_initialization(self):
"""Initialize the training elements
"""
# define the loss
self.criterion = LossFactory.create_loss(self.model_config)
# define optimizer
optimizer = torch.optim.Adam(self.seq2seq_factory.seq2seq.parameters(),
lr=self.model_config.learning_rate,
weight_decay=self.model_config.weight_decay)
# Instantiate the factory with the optimizer and params
scheduler_factory = SchedulerFactory(optimizer, self.model_config, len_train_loader=len(self.train_loader))
# Get the desired scheduler
scheduler = scheduler_factory.get_scheduler(learning_rate_type = self.procedure_config.learning_rate_type)
# define the trainer | self.seq_2_seq_trainer = train_model.Seq2SeqTrainer(self.seq2seq_factory.seq2seq, self.criterion, optimizer, self.device, | 1 | 2023-11-21 12:51:37+00:00 | 12k |
JustRin/Stable-Video-Diffusion | scripts/demo/streamlit_helpers.py | [
{
"identifier": "Img2ImgDiscretizationWrapper",
"path": "scripts/demo/discretization.py",
"snippet": "class Img2ImgDiscretizationWrapper:\n \"\"\"\n wraps a discretizer, and prunes the sigmas\n params:\n strength: float between 0.0 and 1.0. 1.0 means full sampling (all sigmas are returne... | import copy
import math
import os
import cv2
import numpy as np
import streamlit as st
import torch
import torch.nn as nn
import torchvision.transforms as TT
from glob import glob
from typing import Dict, List, Optional, Tuple, Union
from einops import rearrange, repeat
from imwatermark import WatermarkEncoder
from omegaconf import ListConfig, OmegaConf
from PIL import Image
from safetensors.torch import load_file as load_safetensors
from torch import autocast
from torchvision import transforms
from torchvision.utils import make_grid, save_image
from scripts.demo.discretization import (Img2ImgDiscretizationWrapper,
Txt2NoisyDiscretizationWrapper)
from scripts.util.detection.nsfw_and_watermark_dectection import \
DeepFloydDataFiltering
from sgm.inference.helpers import embed_watermark
from sgm.modules.diffusionmodules.guiders import (LinearPredictionGuider,
VanillaCFG)
from sgm.modules.diffusionmodules.sampling import (DPMPP2MSampler,
DPMPP2SAncestralSampler,
EulerAncestralSampler,
EulerEDMSampler,
HeunEDMSampler,
LinearMultistepSampler)
from sgm.util import append_dims, default, instantiate_from_config | 7,216 | guider_config=guider_config,
order=order,
verbose=True,
)
else:
raise ValueError(f"unknown sampler {sampler_name}!")
return sampler
def get_interactive_image() -> Image.Image:
image = st.file_uploader("Input", type=["jpg", "JPEG", "png"])
if image is not None:
image = Image.open(image)
if not image.mode == "RGB":
image = image.convert("RGB")
return image
def load_img(
display: bool = True,
size: Union[None, int, Tuple[int, int]] = None,
center_crop: bool = False,
):
image = get_interactive_image()
if image is None:
return None
if display:
st.image(image)
w, h = image.size
print(f"loaded input image of size ({w}, {h})")
transform = []
if size is not None:
transform.append(transforms.Resize(size))
if center_crop:
transform.append(transforms.CenterCrop(size))
transform.append(transforms.ToTensor())
transform.append(transforms.Lambda(lambda x: 2.0 * x - 1.0))
transform = transforms.Compose(transform)
img = transform(image)[None, ...]
st.text(f"input min/max/mean: {img.min():.3f}/{img.max():.3f}/{img.mean():.3f}")
return img
def get_init_img(batch_size=1, key=None):
init_image = load_img(key=key).cuda()
init_image = repeat(init_image, "1 ... -> b ...", b=batch_size)
return init_image
def do_sample(
model,
sampler,
value_dict,
num_samples,
H,
W,
C,
F,
force_uc_zero_embeddings: Optional[List] = None,
force_cond_zero_embeddings: Optional[List] = None,
batch2model_input: List = None,
return_latents=False,
filter=None,
T=None,
additional_batch_uc_fields=None,
decoding_t=None,
):
force_uc_zero_embeddings = default(force_uc_zero_embeddings, [])
batch2model_input = default(batch2model_input, [])
additional_batch_uc_fields = default(additional_batch_uc_fields, [])
st.text("Sampling")
outputs = st.empty()
precision_scope = autocast
with torch.no_grad():
with precision_scope("cuda"):
with model.ema_scope():
if T is not None:
num_samples = [num_samples, T]
else:
num_samples = [num_samples]
load_model(model.conditioner)
batch, batch_uc = get_batch(
get_unique_embedder_keys_from_conditioner(model.conditioner),
value_dict,
num_samples,
T=T,
additional_batch_uc_fields=additional_batch_uc_fields,
)
c, uc = model.conditioner.get_unconditional_conditioning(
batch,
batch_uc=batch_uc,
force_uc_zero_embeddings=force_uc_zero_embeddings,
force_cond_zero_embeddings=force_cond_zero_embeddings,
)
unload_model(model.conditioner)
for k in c:
if not k == "crossattn":
c[k], uc[k] = map(
lambda y: y[k][: math.prod(num_samples)].to("cuda"), (c, uc)
)
if k in ["crossattn", "concat"] and T is not None:
uc[k] = repeat(uc[k], "b ... -> b t ...", t=T)
uc[k] = rearrange(uc[k], "b t ... -> (b t) ...", t=T)
c[k] = repeat(c[k], "b ... -> b t ...", t=T)
c[k] = rearrange(c[k], "b t ... -> (b t) ...", t=T)
additional_model_inputs = {}
for k in batch2model_input:
if k == "image_only_indicator":
assert T is not None
if isinstance(
|
@st.cache_resource()
def init_st(version_dict, load_ckpt=True, load_filter=True):
state = dict()
if not "model" in state:
config = version_dict["config"]
ckpt = version_dict["ckpt"]
config = OmegaConf.load(config)
model, msg = load_model_from_config(config, ckpt if load_ckpt else None)
state["msg"] = msg
state["model"] = model
state["ckpt"] = ckpt if load_ckpt else None
state["config"] = config
if load_filter:
state["filter"] = DeepFloydDataFiltering(verbose=False)
return state
def load_model(model):
model.cuda()
lowvram_mode = False
def set_lowvram_mode(mode):
global lowvram_mode
lowvram_mode = mode
def initial_model_load(model):
global lowvram_mode
if lowvram_mode:
model.model.half()
else:
model.cuda()
return model
def unload_model(model):
global lowvram_mode
if lowvram_mode:
model.cpu()
torch.cuda.empty_cache()
def load_model_from_config(config, ckpt=None, verbose=True):
model = instantiate_from_config(config.model)
if ckpt is not None:
print(f"Loading model from {ckpt}")
if ckpt.endswith("ckpt"):
pl_sd = torch.load(ckpt, map_location="cpu")
if "global_step" in pl_sd:
global_step = pl_sd["global_step"]
st.info(f"loaded ckpt from global step {global_step}")
print(f"Global Step: {pl_sd['global_step']}")
sd = pl_sd["state_dict"]
elif ckpt.endswith("safetensors"):
sd = load_safetensors(ckpt)
else:
raise NotImplementedError
msg = None
m, u = model.load_state_dict(sd, strict=False)
if len(m) > 0 and verbose:
print("missing keys:")
print(m)
if len(u) > 0 and verbose:
print("unexpected keys:")
print(u)
else:
msg = None
model = initial_model_load(model)
model.eval()
return model, msg
def get_unique_embedder_keys_from_conditioner(conditioner):
return list(set([x.input_key for x in conditioner.embedders]))
def init_embedder_options(keys, init_dict, prompt=None, negative_prompt=None):
# Hardcoded demo settings; might undergo some changes in the future
value_dict = {}
for key in keys:
if key == "txt":
if prompt is None:
prompt = "A professional photograph of an astronaut riding a pig"
if negative_prompt is None:
negative_prompt = ""
prompt = st.text_input("Prompt", prompt)
negative_prompt = st.text_input("Negative prompt", negative_prompt)
value_dict["prompt"] = prompt
value_dict["negative_prompt"] = negative_prompt
if key == "original_size_as_tuple":
orig_width = st.number_input(
"orig_width",
value=init_dict["orig_width"],
min_value=16,
)
orig_height = st.number_input(
"orig_height",
value=init_dict["orig_height"],
min_value=16,
)
value_dict["orig_width"] = orig_width
value_dict["orig_height"] = orig_height
if key == "crop_coords_top_left":
crop_coord_top = st.number_input("crop_coords_top", value=0, min_value=0)
crop_coord_left = st.number_input("crop_coords_left", value=0, min_value=0)
value_dict["crop_coords_top"] = crop_coord_top
value_dict["crop_coords_left"] = crop_coord_left
if key == "aesthetic_score":
value_dict["aesthetic_score"] = 6.0
value_dict["negative_aesthetic_score"] = 2.5
if key == "target_size_as_tuple":
value_dict["target_width"] = init_dict["target_width"]
value_dict["target_height"] = init_dict["target_height"]
if key in ["fps_id", "fps"]:
fps = st.number_input("fps", value=6, min_value=1)
value_dict["fps"] = fps
value_dict["fps_id"] = fps - 1
if key == "motion_bucket_id":
mb_id = st.number_input("motion bucket id", 0, 511, value=127)
value_dict["motion_bucket_id"] = mb_id
if key == "pool_image":
st.text("Image for pool conditioning")
image = load_img(
key="pool_image_input",
size=224,
center_crop=True,
)
if image is None:
st.info("Need an image here")
image = torch.zeros(1, 3, 224, 224)
value_dict["pool_image"] = image
return value_dict
def perform_save_locally(save_path, samples):
os.makedirs(os.path.join(save_path), exist_ok=True)
base_count = len(os.listdir(os.path.join(save_path)))
samples = embed_watermark(samples)
for sample in samples:
sample = 255.0 * rearrange(sample.cpu().numpy(), "c h w -> h w c")
Image.fromarray(sample.astype(np.uint8)).save(
os.path.join(save_path, f"{base_count:09}.png")
)
base_count += 1
def init_save_locally(_dir, init_value: bool = False):
save_locally = st.sidebar.checkbox("Save images locally", value=init_value)
if save_locally:
save_path = st.text_input("Save path", value=os.path.join(_dir, "samples"))
else:
save_path = None
return save_locally, save_path
def get_guider(options, key):
guider = st.sidebar.selectbox(
f"Discretization #{key}",
[
"VanillaCFG",
"IdentityGuider",
"LinearPredictionGuider",
],
options.get("guider", 0),
)
additional_guider_kwargs = options.pop("additional_guider_kwargs", {})
if guider == "IdentityGuider":
guider_config = {
"target": "sgm.modules.diffusionmodules.guiders.IdentityGuider"
}
elif guider == "VanillaCFG":
scale = st.number_input(
f"cfg-scale #{key}",
value=options.get("cfg", 5.0),
min_value=0.0,
)
guider_config = {
"target": "sgm.modules.diffusionmodules.guiders.VanillaCFG",
"params": {
"scale": scale,
**additional_guider_kwargs,
},
}
elif guider == "LinearPredictionGuider":
max_scale = st.number_input(
f"max-cfg-scale #{key}",
value=options.get("cfg", 1.5),
min_value=1.0,
)
min_scale = st.number_input(
f"min guidance scale",
value=options.get("min_cfg", 1.0),
min_value=1.0,
max_value=10.0,
)
guider_config = {
"target": "sgm.modules.diffusionmodules.guiders.LinearPredictionGuider",
"params": {
"max_scale": max_scale,
"min_scale": min_scale,
"num_frames": options["num_frames"],
**additional_guider_kwargs,
},
}
else:
raise NotImplementedError
return guider_config
def init_sampling(
key=1,
img2img_strength: Optional[float] = None,
specify_num_samples: bool = True,
stage2strength: Optional[float] = None,
options: Optional[Dict[str, int]] = None,
):
options = {} if options is None else options
num_rows, num_cols = 1, 1
if specify_num_samples:
num_cols = st.number_input(
f"num cols #{key}", value=num_cols, min_value=1, max_value=10
)
steps = st.sidebar.number_input(
f"steps #{key}", value=options.get("num_steps", 40), min_value=1, max_value=1000
)
sampler = st.sidebar.selectbox(
f"Sampler #{key}",
[
"EulerEDMSampler",
"HeunEDMSampler",
"EulerAncestralSampler",
"DPMPP2SAncestralSampler",
"DPMPP2MSampler",
"LinearMultistepSampler",
],
options.get("sampler", 0),
)
discretization = st.sidebar.selectbox(
f"Discretization #{key}",
[
"LegacyDDPMDiscretization",
"EDMDiscretization",
],
options.get("discretization", 0),
)
discretization_config = get_discretization(discretization, options=options, key=key)
guider_config = get_guider(options=options, key=key)
sampler = get_sampler(sampler, steps, discretization_config, guider_config, key=key)
if img2img_strength is not None:
st.warning(
f"Wrapping {sampler.__class__.__name__} with Img2ImgDiscretizationWrapper"
)
sampler.discretization = Img2ImgDiscretizationWrapper(
sampler.discretization, strength=img2img_strength
)
if stage2strength is not None:
sampler.discretization = Txt2NoisyDiscretizationWrapper(
sampler.discretization, strength=stage2strength, original_steps=steps
)
return sampler, num_rows, num_cols
def get_discretization(discretization, options, key=1):
if discretization == "LegacyDDPMDiscretization":
discretization_config = {
"target": "sgm.modules.diffusionmodules.discretizer.LegacyDDPMDiscretization",
}
elif discretization == "EDMDiscretization":
sigma_min = st.number_input(
f"sigma_min #{key}", value=options.get("sigma_min", 0.03)
) # 0.0292
sigma_max = st.number_input(
f"sigma_max #{key}", value=options.get("sigma_max", 14.61)
) # 14.6146
rho = st.number_input(f"rho #{key}", value=options.get("rho", 3.0))
discretization_config = {
"target": "sgm.modules.diffusionmodules.discretizer.EDMDiscretization",
"params": {
"sigma_min": sigma_min,
"sigma_max": sigma_max,
"rho": rho,
},
}
return discretization_config
def get_sampler(sampler_name, steps, discretization_config, guider_config, key=1):
if sampler_name == "EulerEDMSampler" or sampler_name == "HeunEDMSampler":
s_churn = st.sidebar.number_input(f"s_churn #{key}", value=0.0, min_value=0.0)
s_tmin = st.sidebar.number_input(f"s_tmin #{key}", value=0.0, min_value=0.0)
s_tmax = st.sidebar.number_input(f"s_tmax #{key}", value=999.0, min_value=0.0)
s_noise = st.sidebar.number_input(f"s_noise #{key}", value=1.0, min_value=0.0)
if sampler_name == "EulerEDMSampler":
sampler = EulerEDMSampler(
num_steps=steps,
discretization_config=discretization_config,
guider_config=guider_config,
s_churn=s_churn,
s_tmin=s_tmin,
s_tmax=s_tmax,
s_noise=s_noise,
verbose=True,
)
elif sampler_name == "HeunEDMSampler":
sampler = HeunEDMSampler(
num_steps=steps,
discretization_config=discretization_config,
guider_config=guider_config,
s_churn=s_churn,
s_tmin=s_tmin,
s_tmax=s_tmax,
s_noise=s_noise,
verbose=True,
)
elif (
sampler_name == "EulerAncestralSampler"
or sampler_name == "DPMPP2SAncestralSampler"
):
s_noise = st.sidebar.number_input("s_noise", value=1.0, min_value=0.0)
eta = st.sidebar.number_input("eta", value=1.0, min_value=0.0)
if sampler_name == "EulerAncestralSampler":
sampler = EulerAncestralSampler(
num_steps=steps,
discretization_config=discretization_config,
guider_config=guider_config,
eta=eta,
s_noise=s_noise,
verbose=True,
)
elif sampler_name == "DPMPP2SAncestralSampler":
sampler = DPMPP2SAncestralSampler(
num_steps=steps,
discretization_config=discretization_config,
guider_config=guider_config,
eta=eta,
s_noise=s_noise,
verbose=True,
)
elif sampler_name == "DPMPP2MSampler":
sampler = DPMPP2MSampler(
num_steps=steps,
discretization_config=discretization_config,
guider_config=guider_config,
verbose=True,
)
elif sampler_name == "LinearMultistepSampler":
order = st.sidebar.number_input("order", value=4, min_value=1)
sampler = LinearMultistepSampler(
num_steps=steps,
discretization_config=discretization_config,
guider_config=guider_config,
order=order,
verbose=True,
)
else:
raise ValueError(f"unknown sampler {sampler_name}!")
return sampler
def get_interactive_image() -> Image.Image:
image = st.file_uploader("Input", type=["jpg", "JPEG", "png"])
if image is not None:
image = Image.open(image)
if not image.mode == "RGB":
image = image.convert("RGB")
return image
def load_img(
display: bool = True,
size: Union[None, int, Tuple[int, int]] = None,
center_crop: bool = False,
):
image = get_interactive_image()
if image is None:
return None
if display:
st.image(image)
w, h = image.size
print(f"loaded input image of size ({w}, {h})")
transform = []
if size is not None:
transform.append(transforms.Resize(size))
if center_crop:
transform.append(transforms.CenterCrop(size))
transform.append(transforms.ToTensor())
transform.append(transforms.Lambda(lambda x: 2.0 * x - 1.0))
transform = transforms.Compose(transform)
img = transform(image)[None, ...]
st.text(f"input min/max/mean: {img.min():.3f}/{img.max():.3f}/{img.mean():.3f}")
return img
def get_init_img(batch_size=1, key=None):
init_image = load_img(key=key).cuda()
init_image = repeat(init_image, "1 ... -> b ...", b=batch_size)
return init_image
def do_sample(
model,
sampler,
value_dict,
num_samples,
H,
W,
C,
F,
force_uc_zero_embeddings: Optional[List] = None,
force_cond_zero_embeddings: Optional[List] = None,
batch2model_input: List = None,
return_latents=False,
filter=None,
T=None,
additional_batch_uc_fields=None,
decoding_t=None,
):
force_uc_zero_embeddings = default(force_uc_zero_embeddings, [])
batch2model_input = default(batch2model_input, [])
additional_batch_uc_fields = default(additional_batch_uc_fields, [])
st.text("Sampling")
outputs = st.empty()
precision_scope = autocast
with torch.no_grad():
with precision_scope("cuda"):
with model.ema_scope():
if T is not None:
num_samples = [num_samples, T]
else:
num_samples = [num_samples]
load_model(model.conditioner)
batch, batch_uc = get_batch(
get_unique_embedder_keys_from_conditioner(model.conditioner),
value_dict,
num_samples,
T=T,
additional_batch_uc_fields=additional_batch_uc_fields,
)
c, uc = model.conditioner.get_unconditional_conditioning(
batch,
batch_uc=batch_uc,
force_uc_zero_embeddings=force_uc_zero_embeddings,
force_cond_zero_embeddings=force_cond_zero_embeddings,
)
unload_model(model.conditioner)
for k in c:
if not k == "crossattn":
c[k], uc[k] = map(
lambda y: y[k][: math.prod(num_samples)].to("cuda"), (c, uc)
)
if k in ["crossattn", "concat"] and T is not None:
uc[k] = repeat(uc[k], "b ... -> b t ...", t=T)
uc[k] = rearrange(uc[k], "b t ... -> (b t) ...", t=T)
c[k] = repeat(c[k], "b ... -> b t ...", t=T)
c[k] = rearrange(c[k], "b t ... -> (b t) ...", t=T)
additional_model_inputs = {}
for k in batch2model_input:
if k == "image_only_indicator":
assert T is not None
if isinstance( | sampler.guider, (VanillaCFG, LinearPredictionGuider) | 5 | 2023-11-23 10:57:27+00:00 | 12k |
danilonumeroso/conar | train_reasoner.py | [
{
"identifier": "_PROCESSSOR_DICT",
"path": "models/gnns.py",
"snippet": "_PROCESSSOR_DICT = {\n 'MPNN': MPNN,\n 'GATv2': GATv2,\n 'TriMPNN': TripletMPNN,\n 'GPS': GPS,\n}"
},
{
"identifier": "LitAlgorithmReasoner",
"path": "models/algorithm_reasoner.py",
"snippet": "class Li... | import os
import schema
import torch
import pytorch_lightning as pl
from datetime import datetime
from collections import defaultdict
from docopt import docopt
from models.gnns import _PROCESSSOR_DICT
from models.algorithm_reasoner import LitAlgorithmReasoner
from models.algorithm_processor import LitAlgorithmProcessor
from hyperparameters import get_hyperparameters
from utils_execution import ReasonerZeroerCallback, get_callbacks, maybe_remove | 7,390 | """
Script to train the reasoner model.
Usage:
train_reasoner.py [options]
Options:
-h --help Show this screen.
--patience P Patience value. If present, the training will utilise
early stopping based on validation loss.
--max-epochs ME The maximum epochs to train for. If patience value is not
provided it will always train for ME epochs. [default: 1000]
--model-name MN Name of the model when saving. Defaults to current time
and date if not provided.
--processors PS Which processors to use. String of comma separated values.
[default: MPNN]
--RPHWM Whether to Reduce Processor set Hiddens With MLP?
--gradient-clip-val G Constant for gradient clipping. 0 means no clipping.
[default: 1]
--xavier-on-scalars Use Xavier initialisation for linears that encode scalars.
--biased-gate Bias the gating mechanism towards less updating
--update-edges-hidden Whether to also keep a track of hidden edge state.
--use-LSTM Add an LSTMCell just after the processor step
(in case of several processors, each has its own LSTM)
--use-ln Use Layer Norm in the processor.
--algorithms ALGOS List of algorithms to train on. Repeatable. [default: mst_prim]
--sampler-type (default|geometric) What sampler was used for graph generation. [default: default]
--seed S Random seed to set. [default: 47]
"""
if __name__ == '__main__':
hidden_dim = get_hyperparameters()['dim_latent']
serialised_models_dir = os.path.abspath('./serialised_models/')
schema = schema.Schema({
'--help': bool,
'--xavier-on-scalars': bool,
'--biased-gate': bool,
'--update-edges-hidden': bool,
'--use-LSTM': bool,
'--use-ln': bool,
'--patience': schema.Or(None, schema.Use(int)),
'--max-epochs': schema.Or(None, schema.Use(int)),
'--model-name': schema.Or(None, schema.Use(str)),
'--processors': schema.And(schema.Use(lambda x: x.split(',')), lambda lst: all(x in _PROCESSSOR_DICT for x in lst)),
'--RPHWM': bool,
'--gradient-clip-val': schema.Use(int),
'--algorithms': schema.Use(lambda x: x.split(',')),
'--sampler-type': str,
'--seed': schema.Use(int),
})
args = docopt(__doc__)
args = schema.validate(args)
name = args['--model-name'] if args['--model-name'] is not None else datetime.now().strftime('%b-%d-%Y-%H-%M')
pl.utilities.seed.seed_everything(args['--seed'])
lit_processor = LitAlgorithmProcessor(
hidden_dim,
args['--algorithms'],
dict((algo, {'sampler_type': args['--sampler-type']}) for algo in args['--algorithms']),
dict((algo, LitAlgorithmReasoner) for algo in args['--algorithms']),
False, #args['--ensure-permutation'] is False for non-TSP
reduce_proc_hid_w_MLP=args['--RPHWM'],
update_edges_hidden=args['--update-edges-hidden'],
use_TF=False,
use_gate=True,
use_LSTM=args['--use-LSTM'],
use_ln=args['--use-ln'],
freeze_proc=False, # We don't have a transfer task
processors=args['--processors'],
xavier_on_scalars=args['--xavier-on-scalars'],
biased_gate=args['--biased-gate'],
)
| """
Script to train the reasoner model.
Usage:
train_reasoner.py [options]
Options:
-h --help Show this screen.
--patience P Patience value. If present, the training will utilise
early stopping based on validation loss.
--max-epochs ME The maximum epochs to train for. If patience value is not
provided it will always train for ME epochs. [default: 1000]
--model-name MN Name of the model when saving. Defaults to current time
and date if not provided.
--processors PS Which processors to use. String of comma separated values.
[default: MPNN]
--RPHWM Whether to Reduce Processor set Hiddens With MLP?
--gradient-clip-val G Constant for gradient clipping. 0 means no clipping.
[default: 1]
--xavier-on-scalars Use Xavier initialisation for linears that encode scalars.
--biased-gate Bias the gating mechanism towards less updating
--update-edges-hidden Whether to also keep a track of hidden edge state.
--use-LSTM Add an LSTMCell just after the processor step
(in case of several processors, each has its own LSTM)
--use-ln Use Layer Norm in the processor.
--algorithms ALGOS List of algorithms to train on. Repeatable. [default: mst_prim]
--sampler-type (default|geometric) What sampler was used for graph generation. [default: default]
--seed S Random seed to set. [default: 47]
"""
if __name__ == '__main__':
hidden_dim = get_hyperparameters()['dim_latent']
serialised_models_dir = os.path.abspath('./serialised_models/')
schema = schema.Schema({
'--help': bool,
'--xavier-on-scalars': bool,
'--biased-gate': bool,
'--update-edges-hidden': bool,
'--use-LSTM': bool,
'--use-ln': bool,
'--patience': schema.Or(None, schema.Use(int)),
'--max-epochs': schema.Or(None, schema.Use(int)),
'--model-name': schema.Or(None, schema.Use(str)),
'--processors': schema.And(schema.Use(lambda x: x.split(',')), lambda lst: all(x in _PROCESSSOR_DICT for x in lst)),
'--RPHWM': bool,
'--gradient-clip-val': schema.Use(int),
'--algorithms': schema.Use(lambda x: x.split(',')),
'--sampler-type': str,
'--seed': schema.Use(int),
})
args = docopt(__doc__)
args = schema.validate(args)
name = args['--model-name'] if args['--model-name'] is not None else datetime.now().strftime('%b-%d-%Y-%H-%M')
pl.utilities.seed.seed_everything(args['--seed'])
lit_processor = LitAlgorithmProcessor(
hidden_dim,
args['--algorithms'],
dict((algo, {'sampler_type': args['--sampler-type']}) for algo in args['--algorithms']),
dict((algo, LitAlgorithmReasoner) for algo in args['--algorithms']),
False, #args['--ensure-permutation'] is False for non-TSP
reduce_proc_hid_w_MLP=args['--RPHWM'],
update_edges_hidden=args['--update-edges-hidden'],
use_TF=False,
use_gate=True,
use_LSTM=args['--use-LSTM'],
use_ln=args['--use-ln'],
freeze_proc=False, # We don't have a transfer task
processors=args['--processors'],
xavier_on_scalars=args['--xavier-on-scalars'],
biased_gate=args['--biased-gate'],
)
| all_cbs = get_callbacks(name, serialised_models_dir, args['--patience']) | 5 | 2023-11-20 15:32:43+00:00 | 12k |
oniyevski/oniRedemption | ui/main.py | [
{
"identifier": "Header",
"path": "ui/header.py",
"snippet": "class Header:\n def __init__(self, config):\n self.config = config\n self.functions = Functions()\n\n self.logoText = Row(\n controls=[\n Text(\n \"oniRedemption\",\n ... | import os, threading
from time import sleep
from flet import *
from ui.colors import *
from ui.header import Header
from ui.footer import Footer
from ui.content import Content
from modules.functions import Functions
from modules.config import Config
from modules.process import Process
from modules.modal import Modal
from modules.discord import Discord | 8,041 |
config = Config()
headerClass = Header(config)
footerClass = Footer(config)
contentClass = Content(config)
func = Functions()
|
config = Config()
headerClass = Header(config)
footerClass = Footer(config)
contentClass = Content(config)
func = Functions()
| discord = Discord(content=contentClass, config=config) | 7 | 2023-11-24 21:15:24+00:00 | 12k |
davecasp/add-thin | add_thin/config.py | [
{
"identifier": "DataModule",
"path": "add_thin/data.py",
"snippet": "class DataModule(pl.LightningDataModule):\n \"\"\"\n Datamodule for variable length event sequences for temporal point processes.\n\n Parameters:\n ----------\n root : str\n Path to data.\n name : str\n ... | from pathlib import Path
from omegaconf import DictConfig
from add_thin.data import DataModule
from add_thin.diffusion.model import AddThin
from add_thin.backbones.classifier import PointClassifier
from add_thin.distributions.intensities import MixtureIntensity
from add_thin.tasks import DensityEstimation, Forecasting | 8,121 |
def instantiate_datamodule(config: DictConfig, task_name):
return DataModule(
Path(config.root),
config.name,
batch_size=config.batch_size,
forecast=task_name == "forecast",
)
def instantiate_model(config: DictConfig, datamodule) -> AddThin:
classifier = PointClassifier(
hidden_dims=config.hidden_dims,
layer=config.classifier_layer,
)
|
def instantiate_datamodule(config: DictConfig, task_name):
return DataModule(
Path(config.root),
config.name,
batch_size=config.batch_size,
forecast=task_name == "forecast",
)
def instantiate_model(config: DictConfig, datamodule) -> AddThin:
classifier = PointClassifier(
hidden_dims=config.hidden_dims,
layer=config.classifier_layer,
) | intensity = MixtureIntensity( | 3 | 2023-11-24 13:18:19+00:00 | 12k |
harisankar95/pathfinding3D | pathfinding3d/finder/msp.py | [
{
"identifier": "heuristic",
"path": "pathfinding3d/core/heuristic.py",
"snippet": "def null(dx: Union[int, float], dy: Union[int, float], dz: Union[int, float]) -> float:\ndef manhattan(dx: Union[int, float], dy: Union[int, float], dz: Union[int, float]) -> float:\ndef euclidean(dx: Union[int, float], ... | import time
from collections import deque, namedtuple
from typing import List, Tuple
from ..core import heuristic
from ..core.grid import Grid
from ..core.heap import SimpleHeap
from ..core.node import GridNode
from ..finder.finder import Finder | 7,642 |
class MinimumSpanningTree(Finder):
"""
Minimum Spanning Tree implementation by Brad Beattie
(see https://github.com/brean/python-pathfinding/issues/18)
The wikipedia page has a nice description about MSP:
https://en.wikipedia.org/wiki/Minimum_spanning_tree
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.heuristic = heuristic.null
def tree(self, grid: Grid, start: GridNode) -> List:
"""
Returns a list of nodes that are part of the minimum spanning tree
of the grid.
Parameters
----------
grid : Grid
grid that stores all possible steps/tiles as 3D-list
start : GridNode
start node
Returns
-------
List
"""
return list(self.itertree(grid, start))
def itertree(self, grid: Grid, start: GridNode):
"""
Returns a generator that yields nodes that are part of the minimum
spanning tree of the grid.
Parameters
----------
grid : Grid
grid that stores all possible steps/tiles as 3D-list
start : GridNode
start node
"""
# Finder.process_node requires an end node, which we don't have.
# The following value tricks the call to Finder.apply_heuristic.
# Though maybe we want to generate a limited spanning tree that
# trends in a certain direction? In which case we'd want a more
# nuanced solution.
end = namedtuple("FakeNode", ["x", "y", "z"])(-1, -1, -1)
start.opened = True
|
class MinimumSpanningTree(Finder):
"""
Minimum Spanning Tree implementation by Brad Beattie
(see https://github.com/brean/python-pathfinding/issues/18)
The wikipedia page has a nice description about MSP:
https://en.wikipedia.org/wiki/Minimum_spanning_tree
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.heuristic = heuristic.null
def tree(self, grid: Grid, start: GridNode) -> List:
"""
Returns a list of nodes that are part of the minimum spanning tree
of the grid.
Parameters
----------
grid : Grid
grid that stores all possible steps/tiles as 3D-list
start : GridNode
start node
Returns
-------
List
"""
return list(self.itertree(grid, start))
def itertree(self, grid: Grid, start: GridNode):
"""
Returns a generator that yields nodes that are part of the minimum
spanning tree of the grid.
Parameters
----------
grid : Grid
grid that stores all possible steps/tiles as 3D-list
start : GridNode
start node
"""
# Finder.process_node requires an end node, which we don't have.
# The following value tricks the call to Finder.apply_heuristic.
# Though maybe we want to generate a limited spanning tree that
# trends in a certain direction? In which case we'd want a more
# nuanced solution.
end = namedtuple("FakeNode", ["x", "y", "z"])(-1, -1, -1)
start.opened = True
| open_list = SimpleHeap(start, grid) | 2 | 2023-11-21 10:14:12+00:00 | 12k |
vtarasv/pocket-cfdm | predict.py | [
{
"identifier": "DEVICE",
"path": "params.py",
"snippet": "DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')"
},
{
"identifier": "logger",
"path": "utils/general.py",
"snippet": "def load_pkl(path):\ndef save_pkl(obj, path):\ndef read_strings_from_txt(path):\ndef para... | import copy
import time
import os
import yaml
import numpy as np
import pandas as pd
import torch
from argparse import ArgumentParser
from pathlib import Path
from tqdm import tqdm
from scipy.spatial.distance import pdist, cdist
from torch_geometric.loader import DataLoader
from rdkit import Chem
from params import DEVICE
from utils import logger, ExponentialMovingAverage, TtoSigma, get_t_schedule, set_mol_pose
from features import lig_cat_dims, lig_cont_feats, prot_cat_dims, prot_cont_feats, PocketFeaturizer
from dataset import set_time, modify_conformer, randomize_position, PredSDFDataLoader
from model import FitModel
from rai_chem.protein import PDBParser, Protein
from rai_chem.score import get_fit_score | 7,253 |
def main(args):
with open("data/config/train.yml", "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
with open("data/config/pred.yml", "r") as f:
pconfig = yaml.load(f, Loader=yaml.FullLoader)
if args.samples is not None:
pconfig["samples"] = args.samples
if args.batch_size is not None:
pconfig["batch_size"] = args.batch_size
tmp_pdb_path = ".tmp_hs.pdb"
os.system(f"reduce -Quiet -Trim {args.pdb} > .tmp.pdb")
os.system(f"reduce -Quiet -NOFLIP .tmp.pdb > {tmp_pdb_path}")
with open(tmp_pdb_path, "r") as f:
pdb_lines = f.readlines()
pocket = PDBParser(args.pdb, pdb_lines, remove_hs=False)
pocket = Protein(args.pdb, pocket.atoms)
pocket_cent = pocket.atoms["Coord"].mean(axis=0)
pf = PocketFeaturizer(pocket, radius=config["prot_radius"], max_neighbors=config["prot_max_neighbors"]).graph_feat
pf["coords"] -= pocket_cent
loader = PredSDFDataLoader(args.sdf, pf, pocket_cent, device=DEVICE)
logger.debug(f"using parameters: {config}")
t_to_sigma = TtoSigma(tr_sigma_min=config["tr_sigma_min"], tr_sigma_max=config["tr_sigma_max"],
rot_sigma_min=config["rot_sigma_min"], rot_sigma_max=config["rot_sigma_max"],
tor_sigma_min=config["tor_sigma_min"], tor_sigma_max=config["tor_sigma_max"])
t_schedule = get_t_schedule(inference_steps=pconfig["inference_steps"])
|
def main(args):
with open("data/config/train.yml", "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
with open("data/config/pred.yml", "r") as f:
pconfig = yaml.load(f, Loader=yaml.FullLoader)
if args.samples is not None:
pconfig["samples"] = args.samples
if args.batch_size is not None:
pconfig["batch_size"] = args.batch_size
tmp_pdb_path = ".tmp_hs.pdb"
os.system(f"reduce -Quiet -Trim {args.pdb} > .tmp.pdb")
os.system(f"reduce -Quiet -NOFLIP .tmp.pdb > {tmp_pdb_path}")
with open(tmp_pdb_path, "r") as f:
pdb_lines = f.readlines()
pocket = PDBParser(args.pdb, pdb_lines, remove_hs=False)
pocket = Protein(args.pdb, pocket.atoms)
pocket_cent = pocket.atoms["Coord"].mean(axis=0)
pf = PocketFeaturizer(pocket, radius=config["prot_radius"], max_neighbors=config["prot_max_neighbors"]).graph_feat
pf["coords"] -= pocket_cent
loader = PredSDFDataLoader(args.sdf, pf, pocket_cent, device=DEVICE)
logger.debug(f"using parameters: {config}")
t_to_sigma = TtoSigma(tr_sigma_min=config["tr_sigma_min"], tr_sigma_max=config["tr_sigma_max"],
rot_sigma_min=config["rot_sigma_min"], rot_sigma_max=config["rot_sigma_max"],
tor_sigma_min=config["tor_sigma_min"], tor_sigma_max=config["tor_sigma_max"])
t_schedule = get_t_schedule(inference_steps=pconfig["inference_steps"])
| model = FitModel( | 10 | 2023-11-23 16:09:18+00:00 | 12k |
yuukawahiroshi/ddb-tools | mixins_ddb.py | [
{
"identifier": "DDIModel",
"path": "utils/ddi_utils.py",
"snippet": "class DDIModel:\n def __init__(self, ddi_bytes: bytes) -> None:\n self.ddi_bytes = ddi_bytes\n self.ddi_data = None\n self.phdc_data = {}\n self.tdb_data = {}\n self.sta_data = {}\n self.ar... | from typing import TypedDict
from utils.ddi_utils import DDIModel, str_to_bytes, str_to_data, stream_reverse_search
import argparse
import io
import re
import os
import os.path
import struct | 10,377 | epr: list[int]
snd_id: int
snd: int
fs: int
unknown1: str
pitch1: float
pitch2: float
unknown2: float
unknown3: float
dynamics: float
def byte_replace(src_bytes: bytes, offset: int, override_len: int, replace_bytes: bytes):
return src_bytes[:offset] + replace_bytes + src_bytes[offset + override_len:]
def parse_args(args=None): # : list[str]
# initialize parser
parser = argparse.ArgumentParser(formatter_class=SmartFormatter)
parser.add_argument('--src_path', required=True,
help='source ddi file path')
parser.add_argument('--mixins_path',
help='the mixins ddi file path. default to be same as src_path')
parser.add_argument('--dst_path',
help='output folder, '
'default to be "./[singer name]/mixins"')
parser.add_argument('--mixins_item',
choices=['vqm', 'sta2vqm'],
default='vqm',
help='R|mixins item, '
'default to be "vqm"\n'
'select from: \n'
' vqm: growl\n'
' sta2vqm: convert stationary entry to growl\n')
parser.add_argument('--sta2vqm_phoneme',
default="Grw",
help='phoneme for sta2vqm, will use this phoneme to generate growl, default to be "Grw"')
# parse args
args = parser.parse_args(args)
src_ddi_path: str = os.path.normpath(args.src_path)
if not os.path.exists(src_ddi_path):
raise Exception("ddi file not exists")
src_path = os.path.dirname(src_ddi_path)
src_singer_name = os.path.splitext(os.path.basename(src_ddi_path))[0]
mixins_ddi_path = args.mixins_path or src_ddi_path
mixins_ddi_path: str = os.path.normpath(mixins_ddi_path)
mixins_path = os.path.dirname(mixins_ddi_path)
mixins_singer_name = os.path.splitext(os.path.basename(mixins_ddi_path))[0]
dst_path: str = args.dst_path
if dst_path is None:
dst_path = os.path.join(src_path, "mixins")
dst_path: str = os.path.normpath(dst_path)
# make dirs
if not os.path.exists(dst_path):
os.makedirs(dst_path)
mixins_item = args.mixins_item
return src_path, src_singer_name, mixins_path, mixins_singer_name, dst_path, mixins_item, args
def _create_vqm_stream(vqm_meta_list: list[VQMMeta]):
# Create VQM struct
vqm_stream = io.BytesIO()
vqm_stream.write(b'\xFF'*8)
vqm_stream.write(b'VQM ')
vqm_stream.write((0).to_bytes(4, byteorder='little'))
vqm_stream.write((1).to_bytes(4, byteorder='little'))
vqm_stream.write((0).to_bytes(4, byteorder='little'))
vqm_stream.write((1).to_bytes(4, byteorder='little'))
vqm_stream.write(b'\xFF'*8)
vqm_stream.write(b'VQMu')
vqm_stream.write((0).to_bytes(4, byteorder='little'))
vqm_stream.write((1).to_bytes(4, byteorder='little'))
vqm_stream.write((0).to_bytes(4, byteorder='little'))
vqm_stream.write(len(vqm_meta_list).to_bytes(4, byteorder='little'))
vqm_stream.write(len(vqm_meta_list).to_bytes(4, byteorder='little'))
for vqm_meta in vqm_meta_list:
vqm_stream.write(b'\xFF'*8)
vqm_stream.write(b"VQMp")
vqm_stream.write((0).to_bytes(4, byteorder='little'))
vqm_stream.write((0).to_bytes(4, byteorder='little'))
vqm_stream.write((1).to_bytes(4, byteorder='little'))
vqm_stream.write(str_to_bytes(vqm_meta["unknown1"]))
vqm_stream.write(struct.pack("<f", 224.0)) # Unknown
vqm_stream.write(struct.pack("<f", vqm_meta["pitch2"]))
vqm_stream.write(struct.pack("<f", vqm_meta["unknown2"]))
vqm_stream.write(struct.pack("<f", vqm_meta["dynamics"]))
vqm_stream.write(struct.pack("<f", vqm_meta["unknown3"]))
vqm_stream.write((0).to_bytes(4, byteorder='little'))
# EpR
vqm_stream.write(b'\xFF'*4)
vqm_stream.write(len(vqm_meta["epr"]).to_bytes(4, byteorder='little'))
for epr_offset in vqm_meta["epr"]:
vqm_stream.write(epr_offset.to_bytes(8, byteorder='little'))
# SND
vqm_stream.write(vqm_meta["fs"].to_bytes(4, byteorder='little'))
vqm_stream.write(b'\x01\x00')
vqm_stream.write(vqm_meta["snd_id"].to_bytes(4, byteorder='little'))
vqm_stream.write(vqm_meta["snd"].to_bytes(8, byteorder='little'))
vqm_stream.write(b'\xFF'*0x10)
vqm_stream.write(str_to_data(vqm_meta["idx"]))
vqm_stream.write(str_to_data("GROWL"))
vqm_stream.write(str_to_data("vqm"))
return vqm_stream
| #!/bin/env python3
# I thought what I'd do was, I'd pretend I was one of those deaf-mutes.
from __future__ import annotations
ddi_footer = b'\x05\x00\x00\x00' + "voice".encode()
class SmartFormatter(argparse.HelpFormatter):
def _split_lines(self, text, width):
if text.startswith('R|'):
return text[2:].splitlines()
# this is the RawTextHelpFormatter._split_lines
return argparse.HelpFormatter._split_lines(self, text, width)
class VQMMeta(TypedDict):
idx: str
epr: list[int]
snd_id: int
snd: int
fs: int
unknown1: str
pitch1: float
pitch2: float
unknown2: float
unknown3: float
dynamics: float
def byte_replace(src_bytes: bytes, offset: int, override_len: int, replace_bytes: bytes):
return src_bytes[:offset] + replace_bytes + src_bytes[offset + override_len:]
def parse_args(args=None): # : list[str]
# initialize parser
parser = argparse.ArgumentParser(formatter_class=SmartFormatter)
parser.add_argument('--src_path', required=True,
help='source ddi file path')
parser.add_argument('--mixins_path',
help='the mixins ddi file path. default to be same as src_path')
parser.add_argument('--dst_path',
help='output folder, '
'default to be "./[singer name]/mixins"')
parser.add_argument('--mixins_item',
choices=['vqm', 'sta2vqm'],
default='vqm',
help='R|mixins item, '
'default to be "vqm"\n'
'select from: \n'
' vqm: growl\n'
' sta2vqm: convert stationary entry to growl\n')
parser.add_argument('--sta2vqm_phoneme',
default="Grw",
help='phoneme for sta2vqm, will use this phoneme to generate growl, default to be "Grw"')
# parse args
args = parser.parse_args(args)
src_ddi_path: str = os.path.normpath(args.src_path)
if not os.path.exists(src_ddi_path):
raise Exception("ddi file not exists")
src_path = os.path.dirname(src_ddi_path)
src_singer_name = os.path.splitext(os.path.basename(src_ddi_path))[0]
mixins_ddi_path = args.mixins_path or src_ddi_path
mixins_ddi_path: str = os.path.normpath(mixins_ddi_path)
mixins_path = os.path.dirname(mixins_ddi_path)
mixins_singer_name = os.path.splitext(os.path.basename(mixins_ddi_path))[0]
dst_path: str = args.dst_path
if dst_path is None:
dst_path = os.path.join(src_path, "mixins")
dst_path: str = os.path.normpath(dst_path)
# make dirs
if not os.path.exists(dst_path):
os.makedirs(dst_path)
mixins_item = args.mixins_item
return src_path, src_singer_name, mixins_path, mixins_singer_name, dst_path, mixins_item, args
def _create_vqm_stream(vqm_meta_list: list[VQMMeta]):
# Create VQM struct
vqm_stream = io.BytesIO()
vqm_stream.write(b'\xFF'*8)
vqm_stream.write(b'VQM ')
vqm_stream.write((0).to_bytes(4, byteorder='little'))
vqm_stream.write((1).to_bytes(4, byteorder='little'))
vqm_stream.write((0).to_bytes(4, byteorder='little'))
vqm_stream.write((1).to_bytes(4, byteorder='little'))
vqm_stream.write(b'\xFF'*8)
vqm_stream.write(b'VQMu')
vqm_stream.write((0).to_bytes(4, byteorder='little'))
vqm_stream.write((1).to_bytes(4, byteorder='little'))
vqm_stream.write((0).to_bytes(4, byteorder='little'))
vqm_stream.write(len(vqm_meta_list).to_bytes(4, byteorder='little'))
vqm_stream.write(len(vqm_meta_list).to_bytes(4, byteorder='little'))
for vqm_meta in vqm_meta_list:
vqm_stream.write(b'\xFF'*8)
vqm_stream.write(b"VQMp")
vqm_stream.write((0).to_bytes(4, byteorder='little'))
vqm_stream.write((0).to_bytes(4, byteorder='little'))
vqm_stream.write((1).to_bytes(4, byteorder='little'))
vqm_stream.write(str_to_bytes(vqm_meta["unknown1"]))
vqm_stream.write(struct.pack("<f", 224.0)) # Unknown
vqm_stream.write(struct.pack("<f", vqm_meta["pitch2"]))
vqm_stream.write(struct.pack("<f", vqm_meta["unknown2"]))
vqm_stream.write(struct.pack("<f", vqm_meta["dynamics"]))
vqm_stream.write(struct.pack("<f", vqm_meta["unknown3"]))
vqm_stream.write((0).to_bytes(4, byteorder='little'))
# EpR
vqm_stream.write(b'\xFF'*4)
vqm_stream.write(len(vqm_meta["epr"]).to_bytes(4, byteorder='little'))
for epr_offset in vqm_meta["epr"]:
vqm_stream.write(epr_offset.to_bytes(8, byteorder='little'))
# SND
vqm_stream.write(vqm_meta["fs"].to_bytes(4, byteorder='little'))
vqm_stream.write(b'\x01\x00')
vqm_stream.write(vqm_meta["snd_id"].to_bytes(4, byteorder='little'))
vqm_stream.write(vqm_meta["snd"].to_bytes(8, byteorder='little'))
vqm_stream.write(b'\xFF'*0x10)
vqm_stream.write(str_to_data(vqm_meta["idx"]))
vqm_stream.write(str_to_data("GROWL"))
vqm_stream.write(str_to_data("vqm"))
return vqm_stream
| def mixins_vqm(src_ddi_bytes: bytes, output_stream: io.BufferedWriter, mixins_ddi_model: DDIModel, mixins_ddb_stream: io.BufferedReader): | 0 | 2023-11-20 11:37:46+00:00 | 12k |
shercoo/RGDiffSR | text_super_resolution/interfaces/super_resolution.py | [
{
"identifier": "util",
"path": "utils/util.py",
"snippet": "def str_filt(str_, voc_type):\n def __init__(self, alphabet):\n def encode(self, text):\n def decode(self, t, length, raw=False):\n def __init__(self):\n def add(self, v):\n def reset(self):\n def val(self):\ndef oneHot(v,... | import torch
import sys
import time
import os
import math
import pickle
import copy
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import cv2
import random
import math
import numpy as np
import editdistance
import time
import lpips
from time import gmtime, strftime
from datetime import datetime
from tqdm import tqdm
from utils import util, ssim_psnr
from IPython import embed
from torchvision import transforms
from torch.autograd import Variable
from thop import profile
from PIL import Image
from text_super_resolution.interfaces import base
from utils.meters import AverageMeter
from utils.metrics import get_string_aster, get_string_crnn, Accuracy
from utils.util import str_filt
from utils import utils_moran
from text_super_resolution.model import gumbel_softmax
from text_super_resolution.loss.semantic_loss import SemanticLoss
from copy import deepcopy
from tensorboardX import SummaryWriter
from text_super_resolution.loss.stroke_focus_loss import StrokeFocusLoss
from text_super_resolution.loss.transformer_english_decomposition import Transformer
from text_super_resolution.model.tps_spatial_transformer import TPSSpatialTransformer
from text_super_resolution.model.stn_head import STNHead
from ptflops import get_model_complexity_info | 9,225 |
Args:
image (torch.Tensor): RGB Image to be converted to YUV with shape :math:`(*, 3, H, W)`.
Returns:
torch.Tensor: YUV version of the image with shape :math:`(*, 3, H, W)`.
Example:
>>> input = torch.rand(2, 3, 4, 5)
>>> output = rgb_to_yuv(input) # 2x3x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(image)))
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError("Input size must have a shape of (*, 3, H, W). Got {}"
.format(image.shape))
r: torch.Tensor = image[..., 0, :, :]
g: torch.Tensor = image[..., 1, :, :]
b: torch.Tensor = image[..., 2, :, :]
y: torch.Tensor = 0.299 * r + 0.587 * g + 0.114 * b
u: torch.Tensor = -0.147 * r - 0.289 * g + 0.436 * b
v: torch.Tensor = 0.615 * r - 0.515 * g - 0.100 * b
out: torch.Tensor = torch.stack([y, u, v], -3)
return out
def model_inference(self, images_lr, images_hr, model_list, aster, i, stroke_rec, label_strs, data_name):
ret_dict = {}
ret_dict["label_vecs"] = None
ret_dict["duration"] = 0
if self.args.arch == "tsrn":
before = time.time()
images_sr = model_list[0](images_lr)
after = time.time()
ret_dict["duration"] += (after - before)
if vis_feature:
# [N, C, H, W] -> [N, C, W]
block_feature = model_list[-1].block["7"].mean(2)
elif self.args.arch in ["tsrn_tl", "tsrn_tl_wmask"]:
###############################################
aster_dict_hr = self.parse_crnn_data(images_lr[:, :3, :, :])
label_vecs = aster[1](aster_dict_hr)
label_vecs = torch.nn.functional.softmax(label_vecs, -1)
ret_dict["label_vecs"] = label_vecs
'''
##############
# val: [T, B] <- [T, B, C]
label_val, label_indices = torch.max(label_vecs, -1)
label_indices = label_indices.view(label_indices.shape[0], label_indices.shape[1], 1)
new_label_vecs = torch.zeros(label_vecs.shape).float().to(label_vecs.device)
new_label_vecs.scatter_(2, label_indices, 1)
# label_vecs[label_vecs > 0.5] = 1.
noise = (torch.rand(label_vecs.shape) - 0.5) * 0.2
label_vecs = new_label_vecs.to(label_vecs.device) + noise.to(label_vecs.device)
##############
'''
# [T, B, C] -> [B, T, C] -> [B, 1, T, C]
label_vecs = label_vecs.permute(1, 0, 2).unsqueeze(1).permute(0, 3, 1, 2)
###############################################
images_sr = model_list[0](images_lr, label_vecs)
elif self.args.arch in ABLATION_SET:
cascade_images = images_lr
images_sr = []
if vis:
aster_dict_hr = self.parse_crnn_data(
images_lr[:, :3, :, :] if not self.args.y_domain else images_lrraw[:, :3, :, :])
# print("aster_dict_hr:", aster_dict_hr.shape)
label_vecs_lr = aster[0]['model'](aster_dict_hr)
label_vecs_lr = torch.nn.functional.softmax(label_vecs_lr, -1)
aster_dict_hr = self.parse_crnn_data(
images_hr[:, :3, :, :] if not self.args.y_domain else images_hrraw[:, :3, :, :])
label_vecs_hr = aster[0]['model'](aster_dict_hr)
label_vecs_hr = torch.nn.functional.softmax(label_vecs_hr, -1)
label_vecs_final_hr = label_vecs_hr.permute(1, 0, 2).unsqueeze(1).permute(0, 3, 1, 2)
ret_dict["label_vecs_hr"] = label_vecs_hr
for m_iter in range(self.args.stu_iter):
if self.args.tpg_share:
tpg_pick = 0
else:
tpg_pick = m_iter
stu_model = aster[1][tpg_pick]
aster_dict_lr = self.parse_crnn_data(
cascade_images[:, :3, :, :] if not self.args.y_domain else images_lrraw[:, :3, :,
:]) # cascade_images
before = time.time()
label_vecs_logits = stu_model(aster_dict_lr)
label_vecs = torch.nn.functional.softmax(label_vecs_logits, -1)
label_vecs_final = label_vecs.permute(1, 0, 2).unsqueeze(1).permute(0, 3, 1, 2)
ret_dict["label_vecs"] = label_vecs
# if data_name=='medium':
# print('images_lr.shape: {}\nimages_hr.shape: {}\nlabel_strs.shape: {}\n'.format(images_lr,images_hr,label_strs))
sr = nn.functional.interpolate(images_lr,(self.config.TRAIN.height,self.config.TRAIN.width),mode='bicubic')
|
lpips_vgg = lpips.LPIPS(net="vgg")
vis = False
vis_feature = False
# torch.backends.cudnn.enabled = False
TEST_MODEL = "MORAN"
sem_loss = SemanticLoss()
ctc_loss = torch.nn.CTCLoss(blank=0, reduction='none')
ssim = ssim_psnr.SSIM()
distorted_ssim = ssim_psnr.Distorted_SSIM()
tri_ssim = ssim_psnr.TRI_SSIM()
ABLATION_SET = ["tsrn_tl_cascade_sft", "tsrn_tl_cascade", "srcnn_tl",
"srresnet_tl", "rdn_tl", "vdsr_tl", "tranSR_v4",
"esrgan_tl", "scgan_tl", "tbsrn_tl", "tatt", "pcan_tl"]
_DEBUG = False
class TextSR(base.TextBase):
def SR_confence(self, image, angle):
pass
def rotate_img(self, image, angle):
# convert to cv2 image
if not angle == 0.0:
(h, w) = image.shape[:2]
scale = 1.0
# set the rotation center
center = (w / 2, h / 2)
# anti-clockwise angle in the function
M = cv2.getRotationMatrix2D(center, angle, scale)
image = cv2.warpAffine(image, M, (w, h))
# back to PIL image
return image
def loss_stablizing(self, loss_set, keep_proportion=0.7):
# acsending
sorted_val, sorted_ind = torch.sort(loss_set)
batch_size = loss_set.shape[0]
# print("batch_size:", loss_set, batch_size)
loss_set[sorted_ind[int(keep_proportion * batch_size)]:] = 0.0
return loss_set
def cal_all_models(self, model_list, recognizer_list):
macs = 0.
params = 0.
for model in model_list:
mac, param = get_model_complexity_info(model, (4, 16, 64), as_strings=True,
print_per_layer_stat=False, verbose=True)
print('model {:<30} {:<8}'.format('Computational complexity: ', mac))
print('model {:<30} {:<8}'.format('Number of parameters: ', param))
# macs += mac
# params += param
for recognizer in recognizer_list:
mac, param = get_model_complexity_info(recognizer, (1, 32, 100), as_strings=True,
print_per_layer_stat=False, verbose=True)
print('recognizer {:<30} {:<8}'.format('Computational complexity: ', mac))
print('recognizer {:<30} {:<8}'.format('Number of parameters: ', param))
# macs += mac
# params += param
print('{:<30} {:<8}'.format('Total computational complexity: ', macs))
print('{:<30} {:<8}'.format('Total number of parameters: ', params))
def torch_rotate_img(self, torch_image_batches, arc_batches, rand_offs, off_range=0.2):
# ratios: H / W
device = torch_image_batches.device
N, C, H, W = torch_image_batches.shape
ratios = H / float(W)
# rand_offs = random.random() * (1 - ratios)
ratios_mul = ratios + (rand_offs.unsqueeze(1) * off_range * 2) - off_range
a11, a12, a21, a22 = torch.cos(arc_batches), \
torch.sin(arc_batches), \
-torch.sin(arc_batches), \
torch.cos(arc_batches)
# print("rand_offs:", rand_offs.shape, a12.shape)
x_shift = torch.zeros_like(arc_batches)
y_shift = torch.zeros_like(arc_batches)
# print("device:", device)
affine_matrix = torch.cat([a11.unsqueeze(1), a12.unsqueeze(1) * ratios_mul, x_shift.unsqueeze(1),
a21.unsqueeze(1) / ratios_mul, a22.unsqueeze(1), y_shift.unsqueeze(1)], dim=1)
affine_matrix = affine_matrix.reshape(N, 2, 3).to(device)
affine_grid = F.affine_grid(affine_matrix, torch_image_batches.shape)
distorted_batches = F.grid_sample(torch_image_batches, affine_grid)
return distorted_batches
def yuv_to_rgb(self, image: torch.Tensor) -> torch.Tensor:
r"""Convert an YUV image to RGB.
The image data is assumed to be in the range of (0, 1).
Args:
image (torch.Tensor): YUV Image to be converted to RGB with shape :math:`(*, 3, H, W)`.
Returns:
torch.Tensor: RGB version of the image with shape :math:`(*, 3, H, W)`.
Example:
>>> input = torch.rand(2, 3, 4, 5)
>>> output = yuv_to_rgb(input) # 2x3x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(image)))
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError("Input size must have a shape of (*, 3, H, W). Got {}"
.format(image.shape))
y: torch.Tensor = image[..., 0, :, :]
u: torch.Tensor = image[..., 1, :, :]
v: torch.Tensor = image[..., 2, :, :]
r: torch.Tensor = y + 1.14 * v # coefficient for g is 0
g: torch.Tensor = y + -0.396 * u - 0.581 * v
b: torch.Tensor = y + 2.029 * u # coefficient for b is 0
out: torch.Tensor = torch.stack([r, g, b], -3)
return out
def yuv_to_rgb_cv(self, image: torch.Tensor) -> torch.Tensor:
im_device = image.device
image_np = image.data.cpu().numpy()
image_np = cv2.cvtColor(image_np, cv2.COLOR_YUV2RGB)
return torch.tensor(image_np).to(im_device)
def rgb_to_yuv(self, image: torch.Tensor) -> torch.Tensor:
r"""Convert an RGB image to YUV.
The image data is assumed to be in the range of (0, 1).
Args:
image (torch.Tensor): RGB Image to be converted to YUV with shape :math:`(*, 3, H, W)`.
Returns:
torch.Tensor: YUV version of the image with shape :math:`(*, 3, H, W)`.
Example:
>>> input = torch.rand(2, 3, 4, 5)
>>> output = rgb_to_yuv(input) # 2x3x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(image)))
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError("Input size must have a shape of (*, 3, H, W). Got {}"
.format(image.shape))
r: torch.Tensor = image[..., 0, :, :]
g: torch.Tensor = image[..., 1, :, :]
b: torch.Tensor = image[..., 2, :, :]
y: torch.Tensor = 0.299 * r + 0.587 * g + 0.114 * b
u: torch.Tensor = -0.147 * r - 0.289 * g + 0.436 * b
v: torch.Tensor = 0.615 * r - 0.515 * g - 0.100 * b
out: torch.Tensor = torch.stack([y, u, v], -3)
return out
def model_inference(self, images_lr, images_hr, model_list, aster, i, stroke_rec, label_strs, data_name):
ret_dict = {}
ret_dict["label_vecs"] = None
ret_dict["duration"] = 0
if self.args.arch == "tsrn":
before = time.time()
images_sr = model_list[0](images_lr)
after = time.time()
ret_dict["duration"] += (after - before)
if vis_feature:
# [N, C, H, W] -> [N, C, W]
block_feature = model_list[-1].block["7"].mean(2)
elif self.args.arch in ["tsrn_tl", "tsrn_tl_wmask"]:
###############################################
aster_dict_hr = self.parse_crnn_data(images_lr[:, :3, :, :])
label_vecs = aster[1](aster_dict_hr)
label_vecs = torch.nn.functional.softmax(label_vecs, -1)
ret_dict["label_vecs"] = label_vecs
'''
##############
# val: [T, B] <- [T, B, C]
label_val, label_indices = torch.max(label_vecs, -1)
label_indices = label_indices.view(label_indices.shape[0], label_indices.shape[1], 1)
new_label_vecs = torch.zeros(label_vecs.shape).float().to(label_vecs.device)
new_label_vecs.scatter_(2, label_indices, 1)
# label_vecs[label_vecs > 0.5] = 1.
noise = (torch.rand(label_vecs.shape) - 0.5) * 0.2
label_vecs = new_label_vecs.to(label_vecs.device) + noise.to(label_vecs.device)
##############
'''
# [T, B, C] -> [B, T, C] -> [B, 1, T, C]
label_vecs = label_vecs.permute(1, 0, 2).unsqueeze(1).permute(0, 3, 1, 2)
###############################################
images_sr = model_list[0](images_lr, label_vecs)
elif self.args.arch in ABLATION_SET:
cascade_images = images_lr
images_sr = []
if vis:
aster_dict_hr = self.parse_crnn_data(
images_lr[:, :3, :, :] if not self.args.y_domain else images_lrraw[:, :3, :, :])
# print("aster_dict_hr:", aster_dict_hr.shape)
label_vecs_lr = aster[0]['model'](aster_dict_hr)
label_vecs_lr = torch.nn.functional.softmax(label_vecs_lr, -1)
aster_dict_hr = self.parse_crnn_data(
images_hr[:, :3, :, :] if not self.args.y_domain else images_hrraw[:, :3, :, :])
label_vecs_hr = aster[0]['model'](aster_dict_hr)
label_vecs_hr = torch.nn.functional.softmax(label_vecs_hr, -1)
label_vecs_final_hr = label_vecs_hr.permute(1, 0, 2).unsqueeze(1).permute(0, 3, 1, 2)
ret_dict["label_vecs_hr"] = label_vecs_hr
for m_iter in range(self.args.stu_iter):
if self.args.tpg_share:
tpg_pick = 0
else:
tpg_pick = m_iter
stu_model = aster[1][tpg_pick]
aster_dict_lr = self.parse_crnn_data(
cascade_images[:, :3, :, :] if not self.args.y_domain else images_lrraw[:, :3, :,
:]) # cascade_images
before = time.time()
label_vecs_logits = stu_model(aster_dict_lr)
label_vecs = torch.nn.functional.softmax(label_vecs_logits, -1)
label_vecs_final = label_vecs.permute(1, 0, 2).unsqueeze(1).permute(0, 3, 1, 2)
ret_dict["label_vecs"] = label_vecs
# if data_name=='medium':
# print('images_lr.shape: {}\nimages_hr.shape: {}\nlabel_strs.shape: {}\n'.format(images_lr,images_hr,label_strs))
sr = nn.functional.interpolate(images_lr,(self.config.TRAIN.height,self.config.TRAIN.width),mode='bicubic')
| pred_label = get_string_crnn(label_vecs, use_chinese=False) | 5 | 2023-11-20 06:34:21+00:00 | 12k |
mjavadpur/mj_ONNX_SadTalker | src/facerender/animate_onnx.py | [
{
"identifier": "HEEstimator",
"path": "src/facerender/modules/keypoint_detector.py",
"snippet": "class HEEstimator(nn.Module):\n \"\"\"\n Estimating head pose and expression.\n \"\"\"\n\n def __init__(self, block_expansion, feature_channel, num_kp, image_channel, max_features, num_bins=66, ... | import os
import cv2
import yaml
import numpy as np
import warnings
import safetensors
import safetensors.torch
import imageio
import torch
import torchvision
import webui # in webui
from skimage import img_as_ubyte
from src.facerender.modules.keypoint_detector import HEEstimator, KPDetector
from src.facerender.modules.mapping import MappingNet
from src.facerender.modules.generator import OcclusionAwareGenerator, OcclusionAwareSPADEGenerator
from src.facerender.modules.make_animation import make_animation
from pydub import AudioSegment
from src.utils.face_enhancer_deploy import enhancer_generator_with_len, enhancer_list
from src.utils.paste_pic import paste_pic
from src.utils.videoio import save_video_with_watermark | 8,630 | x_generator[k.replace('kp_extractor.', '')] = v
kp_detector.load_state_dict(x_generator)
if he_estimator is not None:
x_generator = {}
for k,v in checkpoint.items():
if 'he_estimator' in k:
x_generator[k.replace('he_estimator.', '')] = v
he_estimator.load_state_dict(x_generator)
return None
def load_cpk_facevid2vid(self, checkpoint_path, generator=None, discriminator=None,
kp_detector=None, he_estimator=None, optimizer_generator=None,
optimizer_discriminator=None, optimizer_kp_detector=None,
optimizer_he_estimator=None, device="cpu"):
checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
if generator is not None:
generator.load_state_dict(checkpoint['generator'])
if kp_detector is not None:
kp_detector.load_state_dict(checkpoint['kp_detector'])
if he_estimator is not None:
he_estimator.load_state_dict(checkpoint['he_estimator'])
if discriminator is not None:
try:
discriminator.load_state_dict(checkpoint['discriminator'])
except:
print ('No discriminator in the state-dict. Dicriminator will be randomly initialized')
if optimizer_generator is not None:
optimizer_generator.load_state_dict(checkpoint['optimizer_generator'])
if optimizer_discriminator is not None:
try:
optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
except RuntimeError as e:
print ('No discriminator optimizer in the state-dict. Optimizer will be not initialized')
if optimizer_kp_detector is not None:
optimizer_kp_detector.load_state_dict(checkpoint['optimizer_kp_detector'])
if optimizer_he_estimator is not None:
optimizer_he_estimator.load_state_dict(checkpoint['optimizer_he_estimator'])
return checkpoint['epoch']
def load_cpk_mapping(self, checkpoint_path, mapping=None, discriminator=None,
optimizer_mapping=None, optimizer_discriminator=None, device='cpu'):
checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
if mapping is not None:
mapping.load_state_dict(checkpoint['mapping'])
if discriminator is not None:
discriminator.load_state_dict(checkpoint['discriminator'])
if optimizer_mapping is not None:
optimizer_mapping.load_state_dict(checkpoint['optimizer_mapping'])
if optimizer_discriminator is not None:
optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
return checkpoint['epoch']
def generate(self, x, video_save_dir, pic_path, crop_info, enhancer=None, background_enhancer=None, preprocess='crop', img_size=256):
source_image=x['source_image'].type(torch.FloatTensor)
source_semantics=x['source_semantics'].type(torch.FloatTensor)
target_semantics=x['target_semantics_list'].type(torch.FloatTensor)
source_image=source_image.to(self.device)
source_semantics=source_semantics.to(self.device)
target_semantics=target_semantics.to(self.device)
if 'yaw_c_seq' in x:
yaw_c_seq = x['yaw_c_seq'].type(torch.FloatTensor)
yaw_c_seq = x['yaw_c_seq'].to(self.device)
else:
yaw_c_seq = None
if 'pitch_c_seq' in x:
pitch_c_seq = x['pitch_c_seq'].type(torch.FloatTensor)
pitch_c_seq = x['pitch_c_seq'].to(self.device)
else:
pitch_c_seq = None
if 'roll_c_seq' in x:
roll_c_seq = x['roll_c_seq'].type(torch.FloatTensor)
roll_c_seq = x['roll_c_seq'].to(self.device)
else:
roll_c_seq = None
frame_num = x['frame_num']
predictions_video = make_animation(source_image, source_semantics, target_semantics,
self.generator, self.kp_extractor, self.he_estimator, self.mapping,
yaw_c_seq, pitch_c_seq, roll_c_seq, use_exp = True)
predictions_video = predictions_video.reshape((-1,)+predictions_video.shape[2:])
predictions_video = predictions_video[:frame_num]
video = []
for idx in range(predictions_video.shape[0]):
image = predictions_video[idx]
image = np.transpose(image.data.cpu().numpy(), [1, 2, 0]).astype(np.float32)
video.append(image)
result = img_as_ubyte(video)
### the generated video is 256x256, so we keep the aspect ratio,
original_size = crop_info[0]
if original_size:
result = [ cv2.resize(result_i,(img_size, int(img_size * original_size[1]/original_size[0]) )) for result_i in result ]
video_name = x['video_name'] + '.mp4'
path = os.path.join(video_save_dir, 'temp_'+video_name)
imageio.mimsave(path, result, fps=float(25))
av_path = os.path.join(video_save_dir, video_name)
return_path = av_path
audio_path = x['audio_path']
audio_name = os.path.splitext(os.path.split(audio_path)[-1])[0]
new_audio_path = os.path.join(video_save_dir, audio_name+'.wav')
start_time = 0
# cog will not keep the .mp3 filename
sound = AudioSegment.from_file(audio_path)
frames = frame_num
end_time = start_time + frames*1/25*1000
word1=sound.set_frame_rate(16000)
word = word1[start_time:end_time]
word.export(new_audio_path, format="wav")
| warnings.filterwarnings('ignore')
try:
in_webui = True
except:
in_webui = False
class AnimateFromCoeff():
def __init__(self, sadtalker_path, device):
with open(sadtalker_path['facerender_yaml']) as f:
config = yaml.safe_load(f)
generator = OcclusionAwareSPADEGenerator(**config['model_params']['generator_params'],
**config['model_params']['common_params'])
kp_extractor = KPDetector(**config['model_params']['kp_detector_params'],
**config['model_params']['common_params'])
he_estimator = HEEstimator(**config['model_params']['he_estimator_params'],
**config['model_params']['common_params'])
mapping = MappingNet(**config['model_params']['mapping_params'])
generator.to(device)
kp_extractor.to(device)
he_estimator.to(device)
mapping.to(device)
for param in generator.parameters():
param.requires_grad = False
for param in kp_extractor.parameters():
param.requires_grad = False
for param in he_estimator.parameters():
param.requires_grad = False
for param in mapping.parameters():
param.requires_grad = False
if sadtalker_path is not None:
if 'checkpoint' in sadtalker_path: # use safe tensor
self.load_cpk_facevid2vid_safetensor(sadtalker_path['checkpoint'], kp_detector=kp_extractor, generator=generator, he_estimator=None)
else:
self.load_cpk_facevid2vid(sadtalker_path['free_view_checkpoint'], kp_detector=kp_extractor, generator=generator, he_estimator=he_estimator)
else:
raise AttributeError("Checkpoint should be specified for video head pose estimator.")
if sadtalker_path['mappingnet_checkpoint'] is not None:
self.load_cpk_mapping(sadtalker_path['mappingnet_checkpoint'], mapping=mapping)
else:
raise AttributeError("Checkpoint should be specified for video head pose estimator.")
self.kp_extractor = kp_extractor
self.generator = generator
self.he_estimator = he_estimator
self.mapping = mapping
self.kp_extractor.eval()
self.generator.eval()
self.he_estimator.eval()
self.mapping.eval()
self.device = device
def load_cpk_facevid2vid_safetensor(self, checkpoint_path, generator=None,
kp_detector=None, he_estimator=None,
device="cpu"):
checkpoint = safetensors.torch.load_file(checkpoint_path)
if generator is not None:
x_generator = {}
for k,v in checkpoint.items():
if 'generator' in k:
x_generator[k.replace('generator.', '')] = v
generator.load_state_dict(x_generator)
if kp_detector is not None:
x_generator = {}
for k,v in checkpoint.items():
if 'kp_extractor' in k:
x_generator[k.replace('kp_extractor.', '')] = v
kp_detector.load_state_dict(x_generator)
if he_estimator is not None:
x_generator = {}
for k,v in checkpoint.items():
if 'he_estimator' in k:
x_generator[k.replace('he_estimator.', '')] = v
he_estimator.load_state_dict(x_generator)
return None
def load_cpk_facevid2vid(self, checkpoint_path, generator=None, discriminator=None,
kp_detector=None, he_estimator=None, optimizer_generator=None,
optimizer_discriminator=None, optimizer_kp_detector=None,
optimizer_he_estimator=None, device="cpu"):
checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
if generator is not None:
generator.load_state_dict(checkpoint['generator'])
if kp_detector is not None:
kp_detector.load_state_dict(checkpoint['kp_detector'])
if he_estimator is not None:
he_estimator.load_state_dict(checkpoint['he_estimator'])
if discriminator is not None:
try:
discriminator.load_state_dict(checkpoint['discriminator'])
except:
print ('No discriminator in the state-dict. Dicriminator will be randomly initialized')
if optimizer_generator is not None:
optimizer_generator.load_state_dict(checkpoint['optimizer_generator'])
if optimizer_discriminator is not None:
try:
optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
except RuntimeError as e:
print ('No discriminator optimizer in the state-dict. Optimizer will be not initialized')
if optimizer_kp_detector is not None:
optimizer_kp_detector.load_state_dict(checkpoint['optimizer_kp_detector'])
if optimizer_he_estimator is not None:
optimizer_he_estimator.load_state_dict(checkpoint['optimizer_he_estimator'])
return checkpoint['epoch']
def load_cpk_mapping(self, checkpoint_path, mapping=None, discriminator=None,
optimizer_mapping=None, optimizer_discriminator=None, device='cpu'):
checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
if mapping is not None:
mapping.load_state_dict(checkpoint['mapping'])
if discriminator is not None:
discriminator.load_state_dict(checkpoint['discriminator'])
if optimizer_mapping is not None:
optimizer_mapping.load_state_dict(checkpoint['optimizer_mapping'])
if optimizer_discriminator is not None:
optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
return checkpoint['epoch']
def generate(self, x, video_save_dir, pic_path, crop_info, enhancer=None, background_enhancer=None, preprocess='crop', img_size=256):
source_image=x['source_image'].type(torch.FloatTensor)
source_semantics=x['source_semantics'].type(torch.FloatTensor)
target_semantics=x['target_semantics_list'].type(torch.FloatTensor)
source_image=source_image.to(self.device)
source_semantics=source_semantics.to(self.device)
target_semantics=target_semantics.to(self.device)
if 'yaw_c_seq' in x:
yaw_c_seq = x['yaw_c_seq'].type(torch.FloatTensor)
yaw_c_seq = x['yaw_c_seq'].to(self.device)
else:
yaw_c_seq = None
if 'pitch_c_seq' in x:
pitch_c_seq = x['pitch_c_seq'].type(torch.FloatTensor)
pitch_c_seq = x['pitch_c_seq'].to(self.device)
else:
pitch_c_seq = None
if 'roll_c_seq' in x:
roll_c_seq = x['roll_c_seq'].type(torch.FloatTensor)
roll_c_seq = x['roll_c_seq'].to(self.device)
else:
roll_c_seq = None
frame_num = x['frame_num']
predictions_video = make_animation(source_image, source_semantics, target_semantics,
self.generator, self.kp_extractor, self.he_estimator, self.mapping,
yaw_c_seq, pitch_c_seq, roll_c_seq, use_exp = True)
predictions_video = predictions_video.reshape((-1,)+predictions_video.shape[2:])
predictions_video = predictions_video[:frame_num]
video = []
for idx in range(predictions_video.shape[0]):
image = predictions_video[idx]
image = np.transpose(image.data.cpu().numpy(), [1, 2, 0]).astype(np.float32)
video.append(image)
result = img_as_ubyte(video)
### the generated video is 256x256, so we keep the aspect ratio,
original_size = crop_info[0]
if original_size:
result = [ cv2.resize(result_i,(img_size, int(img_size * original_size[1]/original_size[0]) )) for result_i in result ]
video_name = x['video_name'] + '.mp4'
path = os.path.join(video_save_dir, 'temp_'+video_name)
imageio.mimsave(path, result, fps=float(25))
av_path = os.path.join(video_save_dir, video_name)
return_path = av_path
audio_path = x['audio_path']
audio_name = os.path.splitext(os.path.split(audio_path)[-1])[0]
new_audio_path = os.path.join(video_save_dir, audio_name+'.wav')
start_time = 0
# cog will not keep the .mp3 filename
sound = AudioSegment.from_file(audio_path)
frames = frame_num
end_time = start_time + frames*1/25*1000
word1=sound.set_frame_rate(16000)
word = word1[start_time:end_time]
word.export(new_audio_path, format="wav")
| save_video_with_watermark(path, new_audio_path, av_path, watermark= False) | 9 | 2023-11-25 06:53:12+00:00 | 12k |
microsoft/Project-BayesDAG | src/causica/models/model.py | [
{
"identifier": "Variables",
"path": "src/causica/datasets/variables.py",
"snippet": "class Variables:\n \"\"\"\n This class represents any variables present in a model.\n \"\"\"\n\n def __init__(\n self,\n variables: List[Variable],\n auxiliary_variables: Optional[List[... | import os
import numpy as np
from typing import Tuple
from ..datasets.variables import Variables
from ..utils.helper_functions import write_git_info
from .imodel import IModel | 8,707 | # This is required in python 3 to allow return types of the same class.
from __future__ import annotations
class Model(IModel):
"""
Abstract base model class.
"""
def __init__(self, model_id: str, variables: Variables, save_dir: str) -> None:
"""
Args:
model_id: Unique model ID for referencing this model instance.
variables: Information about variables/features used by this model.
save_dir: Location to save any information about this model, including training data.
It will be created if it doesn't exist.
"""
super().__init__(model_id, variables, save_dir)
os.makedirs(self.save_dir, exist_ok=True)
try:
| # This is required in python 3 to allow return types of the same class.
from __future__ import annotations
class Model(IModel):
"""
Abstract base model class.
"""
def __init__(self, model_id: str, variables: Variables, save_dir: str) -> None:
"""
Args:
model_id: Unique model ID for referencing this model instance.
variables: Information about variables/features used by this model.
save_dir: Location to save any information about this model, including training data.
It will be created if it doesn't exist.
"""
super().__init__(model_id, variables, save_dir)
os.makedirs(self.save_dir, exist_ok=True)
try: | write_git_info(self.save_dir) | 1 | 2023-11-21 12:55:08+00:00 | 12k |
camenduru/Video-LLaVA-hf | llava/model/multimodal_encoder/languagebind/depth/modeling_depth.py | [
{
"identifier": "LanguageBindDepthConfig",
"path": "llava/model/multimodal_encoder/languagebind/depth/configuration_depth.py",
"snippet": "class LanguageBindDepthConfig(PretrainedConfig):\n r\"\"\"\n [`CLIPConfig`] is the configuration class to store the configuration of a [`CLIPModel`]. It is use... | import math
import torch
from typing import Optional, Tuple, Union
from einops import rearrange
from peft import LoraConfig, get_peft_model
from torch import nn
from torch.nn import functional as F
from transformers import PreTrainedModel, add_start_docstrings
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from transformers.models.clip.modeling_clip import CLIPMLP, CLIPAttention, CLIPTextEmbeddings, CLIPVisionEmbeddings, \
CLIPVisionModelWithProjection, CLIPTextModelWithProjection, _expand_mask, CLIPOutput, clip_loss
from transformers.utils import add_start_docstrings_to_model_forward, replace_return_docstrings
from .configuration_depth import LanguageBindDepthConfig, CLIPVisionConfig, CLIPTextConfig | 7,874 | Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`CLIPEncoderLayer`].
Args:
config: CLIPConfig
"""
def __init__(self, config: LanguageBindDepthConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([CLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
inputs_embeds,
attention_mask: Optional[torch.Tensor] = None,
causal_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutput]:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
causal_attention_mask,
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
class CLIPTextTransformer(nn.Module):
|
class PatchDropout(nn.Module):
"""
https://arxiv.org/abs/2212.00794
"""
def __init__(self, prob, exclude_first_token=True):
super().__init__()
assert 0 <= prob < 1.
self.prob = prob
self.exclude_first_token = exclude_first_token # exclude CLS token
def forward(self, x, B, T):
if not self.training or self.prob == 0.:
return x
if self.exclude_first_token:
cls_tokens, x = x[:, :1], x[:, 1:]
else:
cls_tokens = torch.jit.annotate(torch.Tensor, x[:, :1])
batch = x.size()[0]
num_tokens = x.size()[1]
batch_indices = torch.arange(batch)
batch_indices = batch_indices[..., None]
keep_prob = 1 - self.prob
num_patches_keep = max(1, int(num_tokens * keep_prob))
if T == 1:
rand = torch.randn(batch, num_tokens)
patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices
else:
rand = torch.randn(B, num_tokens)
patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices
patch_indices_keep = patch_indices_keep.unsqueeze(1).repeat(1, T, 1)
patch_indices_keep = rearrange(patch_indices_keep, 'b t n -> (b t) n')
x = x[batch_indices, patch_indices_keep]
if self.exclude_first_token:
x = torch.cat((cls_tokens, x), dim=1)
return x
class CLIPEncoderLayer(nn.Module):
def __init__(self, config: LanguageBindDepthConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = CLIPAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = CLIPMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.add_time_attn = config.add_time_attn
if self.add_time_attn:
self.t = config.num_frames
self.temporal_embedding = nn.Parameter(torch.zeros(1, config.num_frames, config.hidden_size))
nn.init.normal_(self.temporal_embedding, std=config.hidden_size ** -0.5)
self.embed_dim = config.hidden_size
self.temporal_attn = CLIPAttention(config)
self.temporal_layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.temporal_mlp = CLIPMLP(config)
self.temporal_layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
causal_attention_mask: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
if self.add_time_attn:
bt, n, d = hidden_states.shape
t = self.t
# time embed
if t != 1:
n = hidden_states.shape[1]
hidden_states = rearrange(hidden_states, '(b t) n d -> (b n) t d', t=t)
hidden_states = hidden_states + self.temporal_embedding[:, :t, :]
hidden_states = rearrange(hidden_states, '(b n) t d -> (b t) n d', n=n)
# time attn
residual = hidden_states
hidden_states = rearrange(hidden_states, '(b t) n d -> (b n) t d', t=t)
# hidden_states = self.layer_norm1(hidden_states) # share layernorm
hidden_states = self.temporal_layer_norm1(hidden_states)
hidden_states, attn_weights = self.temporal_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
causal_attention_mask=causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = residual + rearrange(hidden_states, '(b n) t d -> (b t) n d', n=n)
residual = hidden_states
hidden_states = rearrange(hidden_states, '(b t) n d -> (b n) t d', t=t)
# hidden_states = self.layer_norm2(hidden_states) # share layernorm
hidden_states = self.temporal_layer_norm2(hidden_states)
hidden_states = self.temporal_mlp(hidden_states)
hidden_states = residual + rearrange(hidden_states, '(b n) t d -> (b t) n d', n=n)
# spatial attn
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
causal_attention_mask=causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class CLIPPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LanguageBindDepthConfig
base_model_prefix = "clip"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor
if isinstance(module, CLIPTextEmbeddings):
module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
elif isinstance(module, CLIPVisionEmbeddings):
factor = self.config.initializer_factor
nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
elif isinstance(module, CLIPAttention):
factor = self.config.initializer_factor
in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
out_proj_std = (module.embed_dim**-0.5) * factor
nn.init.normal_(module.q_proj.weight, std=in_proj_std)
nn.init.normal_(module.k_proj.weight, std=in_proj_std)
nn.init.normal_(module.v_proj.weight, std=in_proj_std)
nn.init.normal_(module.out_proj.weight, std=out_proj_std)
elif isinstance(module, CLIPMLP):
factor = self.config.initializer_factor
in_proj_std = (
(module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
)
fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
nn.init.normal_(module.fc1.weight, std=fc_std)
nn.init.normal_(module.fc2.weight, std=in_proj_std)
elif isinstance(module, LanguageBindDepth):
nn.init.normal_(
module.text_projection.weight,
std=module.text_embed_dim**-0.5 * self.config.initializer_factor,
)
nn.init.normal_(
module.visual_projection.weight,
std=module.vision_embed_dim**-0.5 * self.config.initializer_factor,
)
elif isinstance(module, CLIPVisionModelWithProjection):
nn.init.normal_(
module.visual_projection.weight,
std=self.config.hidden_size**-0.5 * self.config.initializer_factor,
)
elif isinstance(module, CLIPTextModelWithProjection):
nn.init.normal_(
module.text_projection.weight,
std=self.config.hidden_size**-0.5 * self.config.initializer_factor,
)
if isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, CLIPEncoder):
module.gradient_checkpointing = value
CLIP_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`CLIPConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
CLIP_TEXT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
CLIP_VISION_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
CLIP_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
return_loss (`bool`, *optional*):
Whether or not to return the contrastive loss.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
class CLIPEncoder(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`CLIPEncoderLayer`].
Args:
config: CLIPConfig
"""
def __init__(self, config: LanguageBindDepthConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([CLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
inputs_embeds,
attention_mask: Optional[torch.Tensor] = None,
causal_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutput]:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
causal_attention_mask,
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
class CLIPTextTransformer(nn.Module): | def __init__(self, config: CLIPTextConfig): | 2 | 2023-11-21 14:33:54+00:00 | 12k |
Sysys242/DMDPY | modules/features.py | [
{
"identifier": "scrapping_task",
"path": "logics/member_scrapper.py",
"snippet": "def scrapping_task():\n token = logger.delay_input('Please enter the token to scrap on: ')\n gId = logger.delay_input('Please enter the guild id to scrap on: ')\n cId = logger.delay_input('Please enter the channe... | from logics.member_scrapper import scrapping_task
from logics.display_changer import display_task
from logics.avatar_changer import avatar_task
from logics.token_checker import checking_task
from logics.fast_friender import fast_friending_task
from logics.server_joiner import joining_task
from logics.tos_accepter import tos_task
from logics.bio_changer import bio_task
from logics.id_to_user import itu_task
from logics.friender import friending_task
from logics.mass_dm import dming_task | 9,502 |
def soon():
print("Soon...")
features = {
'Friender': friending_task,
'Fast Friender': fast_friending_task,
'Joiner': joining_task,
'Mass Dm': dming_task,
'Member Scapper': scrapping_task,
|
def soon():
print("Soon...")
features = {
'Friender': friending_task,
'Fast Friender': fast_friending_task,
'Joiner': joining_task,
'Mass Dm': dming_task,
'Member Scapper': scrapping_task,
| 'Tos Accepter': tos_task, | 6 | 2023-11-19 10:02:14+00:00 | 12k |
ymp5078/AI-SAM | segment_anything/automatic_mask_generator.py | [
{
"identifier": "Sam",
"path": "segment_anything/modeling/sam.py",
"snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decode... | import numpy as np
import torch
import cv2 # type: ignore # noqa: F401
from torchvision.ops.boxes import batched_nms, box_area # type: ignore
from typing import Any, Dict, List, Optional, Tuple
from .modeling import Sam
from .predictor import SamPredictor
from .utils.amg import (
MaskData,
area_from_rle,
batch_iterator,
batched_mask_to_box,
box_xyxy_to_xywh,
build_all_layer_point_grids,
calculate_stability_score,
coco_encode_rle,
generate_crop_boxes,
is_box_near_crop_edge,
mask_to_rle_pytorch,
remove_small_regions,
rle_to_mask,
uncrop_boxes_xyxy,
uncrop_masks,
uncrop_points,
)
from pycocotools import mask as mask_utils # type: ignore # noqa: F401 | 9,871 | Using a SAM model, generates masks for the entire image.
Generates a grid of point prompts over the image, then filters
low quality and duplicate masks. The default settings are chosen
for SAM with a ViT-H backbone.
Arguments:
model (Sam): The SAM model to use for mask prediction.
points_per_side (int or None): The number of points to be sampled
along one side of the image. The total number of points is
points_per_side**2. If None, 'point_grids' must provide explicit
point sampling.
points_per_batch (int): Sets the number of points run simultaneously
by the model. Higher numbers may be faster but use more GPU memory.
pred_iou_thresh (float): A filtering threshold in [0,1], using the
model's predicted mask quality.
stability_score_thresh (float): A filtering threshold in [0,1], using
the stability of the mask under changes to the cutoff used to binarize
the model's mask predictions.
stability_score_offset (float): The amount to shift the cutoff when
calculated the stability score.
box_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks.
crop_n_layers (int): If >0, mask prediction will be run again on
crops of the image. Sets the number of layers to run, where each
layer has 2**i_layer number of image crops.
crop_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks between different crops.
crop_overlap_ratio (float): Sets the degree to which crops overlap.
In the first crop layer, crops will overlap by this fraction of
the image length. Later layers with more crops scale down this overlap.
crop_n_points_downscale_factor (int): The number of points-per-side
sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
point_grids (list(np.ndarray) or None): A list over explicit grids
of points used for sampling, normalized to [0,1]. The nth grid in the
list is used in the nth crop layer. Exclusive with points_per_side.
min_mask_region_area (int): If >0, postprocessing will be applied
to remove disconnected regions and holes in masks with area smaller
than min_mask_region_area. Requires opencv.
output_mode (str): The form masks are returned in. Can be 'binary_mask',
'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
For large resolutions, 'binary_mask' may consume large amounts of
memory.
"""
assert (points_per_side is None) != (
point_grids is None
), "Exactly one of points_per_side or point_grid must be provided."
if points_per_side is not None:
self.point_grids = build_all_layer_point_grids(
points_per_side,
crop_n_layers,
crop_n_points_downscale_factor,
)
elif point_grids is not None:
self.point_grids = point_grids
else:
raise ValueError("Can't have both points_per_side and point_grid be None.")
assert output_mode in [
"binary_mask",
"uncompressed_rle",
"coco_rle",
], f"Unknown output_mode {output_mode}."
if output_mode == "coco_rle":
if min_mask_region_area > 0:
self.predictor = SamPredictor(model)
self.points_per_batch = points_per_batch
self.pred_iou_thresh = pred_iou_thresh
self.stability_score_thresh = stability_score_thresh
self.stability_score_offset = stability_score_offset
self.box_nms_thresh = box_nms_thresh
self.crop_n_layers = crop_n_layers
self.crop_nms_thresh = crop_nms_thresh
self.crop_overlap_ratio = crop_overlap_ratio
self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
self.min_mask_region_area = min_mask_region_area
self.output_mode = output_mode
@torch.no_grad()
def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
"""
Generates masks for the given image.
Arguments:
image (np.ndarray): The image to generate masks for, in HWC uint8 format.
Returns:
list(dict(str, any)): A list over records for masks. Each record is
a dict containing the following keys:
segmentation (dict(str, any) or np.ndarray): The mask. If
output_mode='binary_mask', is an array of shape HW. Otherwise,
is a dictionary containing the RLE.
bbox (list(float)): The box around the mask, in XYWH format.
area (int): The area in pixels of the mask.
predicted_iou (float): The model's own prediction of the mask's
quality. This is filtered by the pred_iou_thresh parameter.
point_coords (list(list(float))): The point coordinates input
to the model to generate this mask.
stability_score (float): A measure of the mask's quality. This
is filtered on using the stability_score_thresh parameter.
crop_box (list(float)): The crop of the image used to generate
the mask, given in XYWH format.
"""
# Generate masks
mask_data = self._generate_masks(image)
# Filter small disconnected regions and holes in masks
if self.min_mask_region_area > 0:
mask_data = self.postprocess_small_regions(
mask_data,
self.min_mask_region_area,
max(self.box_nms_thresh, self.crop_nms_thresh),
)
# Encode masks
if self.output_mode == "coco_rle":
mask_data["segmentations"] = [
| # -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class SamAutomaticMaskGenerator:
def __init__(
self,
model: Sam,
points_per_side: Optional[int] = 32,
points_per_batch: int = 64,
pred_iou_thresh: float = 0.88,
stability_score_thresh: float = 0.95,
stability_score_offset: float = 1.0,
box_nms_thresh: float = 0.7,
crop_n_layers: int = 0,
crop_nms_thresh: float = 0.7,
crop_overlap_ratio: float = 512 / 1500,
crop_n_points_downscale_factor: int = 1,
point_grids: Optional[List[np.ndarray]] = None,
min_mask_region_area: int = 0,
output_mode: str = "binary_mask",
) -> None:
"""
Using a SAM model, generates masks for the entire image.
Generates a grid of point prompts over the image, then filters
low quality and duplicate masks. The default settings are chosen
for SAM with a ViT-H backbone.
Arguments:
model (Sam): The SAM model to use for mask prediction.
points_per_side (int or None): The number of points to be sampled
along one side of the image. The total number of points is
points_per_side**2. If None, 'point_grids' must provide explicit
point sampling.
points_per_batch (int): Sets the number of points run simultaneously
by the model. Higher numbers may be faster but use more GPU memory.
pred_iou_thresh (float): A filtering threshold in [0,1], using the
model's predicted mask quality.
stability_score_thresh (float): A filtering threshold in [0,1], using
the stability of the mask under changes to the cutoff used to binarize
the model's mask predictions.
stability_score_offset (float): The amount to shift the cutoff when
calculated the stability score.
box_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks.
crop_n_layers (int): If >0, mask prediction will be run again on
crops of the image. Sets the number of layers to run, where each
layer has 2**i_layer number of image crops.
crop_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks between different crops.
crop_overlap_ratio (float): Sets the degree to which crops overlap.
In the first crop layer, crops will overlap by this fraction of
the image length. Later layers with more crops scale down this overlap.
crop_n_points_downscale_factor (int): The number of points-per-side
sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
point_grids (list(np.ndarray) or None): A list over explicit grids
of points used for sampling, normalized to [0,1]. The nth grid in the
list is used in the nth crop layer. Exclusive with points_per_side.
min_mask_region_area (int): If >0, postprocessing will be applied
to remove disconnected regions and holes in masks with area smaller
than min_mask_region_area. Requires opencv.
output_mode (str): The form masks are returned in. Can be 'binary_mask',
'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
For large resolutions, 'binary_mask' may consume large amounts of
memory.
"""
assert (points_per_side is None) != (
point_grids is None
), "Exactly one of points_per_side or point_grid must be provided."
if points_per_side is not None:
self.point_grids = build_all_layer_point_grids(
points_per_side,
crop_n_layers,
crop_n_points_downscale_factor,
)
elif point_grids is not None:
self.point_grids = point_grids
else:
raise ValueError("Can't have both points_per_side and point_grid be None.")
assert output_mode in [
"binary_mask",
"uncompressed_rle",
"coco_rle",
], f"Unknown output_mode {output_mode}."
if output_mode == "coco_rle":
if min_mask_region_area > 0:
self.predictor = SamPredictor(model)
self.points_per_batch = points_per_batch
self.pred_iou_thresh = pred_iou_thresh
self.stability_score_thresh = stability_score_thresh
self.stability_score_offset = stability_score_offset
self.box_nms_thresh = box_nms_thresh
self.crop_n_layers = crop_n_layers
self.crop_nms_thresh = crop_nms_thresh
self.crop_overlap_ratio = crop_overlap_ratio
self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
self.min_mask_region_area = min_mask_region_area
self.output_mode = output_mode
@torch.no_grad()
def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
"""
Generates masks for the given image.
Arguments:
image (np.ndarray): The image to generate masks for, in HWC uint8 format.
Returns:
list(dict(str, any)): A list over records for masks. Each record is
a dict containing the following keys:
segmentation (dict(str, any) or np.ndarray): The mask. If
output_mode='binary_mask', is an array of shape HW. Otherwise,
is a dictionary containing the RLE.
bbox (list(float)): The box around the mask, in XYWH format.
area (int): The area in pixels of the mask.
predicted_iou (float): The model's own prediction of the mask's
quality. This is filtered by the pred_iou_thresh parameter.
point_coords (list(list(float))): The point coordinates input
to the model to generate this mask.
stability_score (float): A measure of the mask's quality. This
is filtered on using the stability_score_thresh parameter.
crop_box (list(float)): The crop of the image used to generate
the mask, given in XYWH format.
"""
# Generate masks
mask_data = self._generate_masks(image)
# Filter small disconnected regions and holes in masks
if self.min_mask_region_area > 0:
mask_data = self.postprocess_small_regions(
mask_data,
self.min_mask_region_area,
max(self.box_nms_thresh, self.crop_nms_thresh),
)
# Encode masks
if self.output_mode == "coco_rle":
mask_data["segmentations"] = [ | coco_encode_rle(rle) for rle in mask_data["rles"] | 9 | 2023-11-26 23:42:53+00:00 | 12k |
sophiaalthammer/alforrankers | matchmaker/train.py | [
{
"identifier": "DynamicTeacher",
"path": "matchmaker/distillation/dynamic_teacher.py",
"snippet": "class DynamicTeacher():\n '''\n Wraps a trained model checkpoint and the training batch queue to score (inference only) samples from the batch\n '''\n\n def __init__(self,\n co... | from typing import Dict, Tuple, List
from contextlib import nullcontext
from transformers import logging
from allennlp.common import Params, Tqdm
from torch.optim import *
from torch.optim.lr_scheduler import *
from torch import nn as nn
from allennlp.nn.util import move_to_device
from matchmaker.utils.utils import *
from matchmaker.utils.config import *
from matchmaker.distillation.dynamic_teacher import DynamicTeacher
from matchmaker.utils.running_average import RunningAverage
from matchmaker.models.all import get_model, get_word_embedder, build_model
from matchmaker.losses.all import get_loss,merge_loss
from matchmaker.active_learning.generate_training_subset import *
from matchmaker.autolabel_domain.robust04_nfoldcrosstrain import create_nfold_train_test_data
from matchmaker.utils.cross_experiment_cache import *
from matchmaker.utils.input_pipeline import *
from matchmaker.utils.performance_monitor import *
from matchmaker.eval import *
from torch.utils.tensorboard import SummaryWriter
from rich.console import Console
from rich.live import Live
import os
import warnings
import gc
import time
import sys,traceback
import itertools
import torch
import torch.distributed as dist
import numpy
import random
import transformers
import sys ,subprocess | 7,212 |
console = Console()
if __name__ == "__main__":
#
# config
#
args = get_parser().parse_args()
from_scratch = True
train_mode = "Train"
if args.continue_folder:
train_mode = "Evaluate"
from_scratch = False
run_folder = args.continue_folder
config = get_config_single(os.path.join(run_folder, "config.yaml"), args.config_overwrites)
else:
if not args.run_name:
raise Exception("--run-name must be set (or continue-folder)")
config = get_config(args.config_file, args.config_overwrites)
run_folder = prepare_experiment(args, config)
logger = get_logger_to_file(run_folder, "main")
logger.info("Running: %s", str(sys.argv))
tb_writer = SummaryWriter(run_folder)
print_hello(config,run_folder,train_mode)
#
# random seeds
#
torch.manual_seed(config["random_seed"])
numpy.random.seed(config["random_seed"])
random.seed(config["random_seed"])
logger.info("Torch seed: %i ",torch.initial_seed())
# hardcode gpu usage
cuda_device = 0 # always take the first -> set others via cuda flag in bash
perf_monitor = PerformanceMonitor.get()
perf_monitor.start_block("startup")
#
# create the training subset in case of subset training
# -------------------------------
#
train_subset = config.get("train_subset", False)
train_subset_incrementally = config.get('train_subset_incrementally', False)
train_subset_warmstart = config.get('train_subset_warmstart', False)
train_subset_control_topic_no = config.get('train_subset_control_topic_no', -1)
train_subset_firstk = config.get('train_subset_firstk', False)
triplet_no_per_topic = config.get('triplet_no_per_topic', 10)
if train_subset:
if not train_subset_incrementally:
if train_subset_control_topic_no > -1:
train_file_path = generate_subset_control_topic_no(config["train_tsv"], run_folder, config['train_subset_control_topic_no'], config["random_seed"], triplet_no_per_topic)
elif train_subset_firstk:
train_file_path = generate_train_subset_from_train_firstk(config["train_tsv"], run_folder, config['train_data_size'])
else:
train_file_path = generate_train_subset_from_train(config["train_tsv"], run_folder, config['train_data_size'], config["random_seed"])
config["train_tsv"] = train_file_path
else:
if not train_subset_warmstart:
if train_subset_control_topic_no > -1:
train_file_path = generate_subset_control_incrementally(config["train_tsv"], run_folder, config['train_subset_control_topic_no'],
config["random_seed"], triplet_no_per_topic,
config['expirement_base_path'],
config['previous_exp_name'])
else:
train_file_path = generate_train_subset_incrementally(config["train_tsv"], run_folder, config['train_data_size'],
config["random_seed"], config['expirement_base_path'], config['previous_exp_name'])
else:
if train_subset_control_topic_no > -1:
train_file_path, warmstart_path = generate_subset_control_incrementally(config["train_tsv"], run_folder, config['train_subset_control_topic_no'],
config["random_seed"], triplet_no_per_topic,
config['expirement_base_path'],
config['previous_exp_name'], warmstart_model=True)
else:
train_file_path, warmstart_path = generate_train_subset_incrementally(config["train_tsv"], run_folder, config['train_data_size'],
config["random_seed"], config['expirement_base_path'], config['previous_exp_name'], warmstart_model=True)
config["warmstart_model_path"] = warmstart_path
config["train_tsv"] = train_file_path
#
# create the training nfold subset
# -------------------------------
#
train_subset_nfold = config.get("train_subset_nfold", False)
if train_subset_nfold:
if config['nfold_sampling'] == 'nfold':
print('use nfolds from {} to {}'.format(config["fold_lb"], config["fold_ub"]))
else:
print('use random sampling for fold')
# now i use the config["train_tsv"] to subsample the size!
create_nfold_train_test_data(config['collection'], config['queries'], run_folder, config['candidate_path'],
config['validation_cont']['qrels'], sampling=config['nfold_sampling'],
fold_lb=config.get("fold_lb", 0), fold_ub=config.get('fold_ub', 0),
train_size=config.get('fold_train_size', 0), index_dir=config.get('index_dir', ''),
n_samples_per_query=config.get('n_samples_per_query', -1))
config["train_tsv"] = os.path.join(run_folder, 'train_triples_nfold.tsv')
config["validation_cont"]["tsv"] = os.path.join(run_folder, 'test_nfold_queries_rerank.tsv')
config["test"]["top1000_description"]["tsv"] = os.path.join(run_folder, 'test_nfold_queries.tsv')
#
# create (and load) model instance
# -------------------------------
#
# load candidate set for efficient cs@N validation
validation_cont_candidate_set = None
if from_scratch and "candidate_set_path" in config["validation_cont"]:
validation_cont_candidate_set = parse_candidate_set(config["validation_cont"]["candidate_set_path"],config["validation_cont"]["candidate_set_from_to"][1])
word_embedder, padding_idx = get_word_embedder(config)
| #
# train a neural-ir model
# -------------------------------
os.environ['PYTHONHASHSEED'] = "42" # very important to keep set operations deterministic
os.environ["TOKENIZERS_PARALLELISM"] = "false"
os.environ["OMP_NUM_THREADS"] = "1"
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # needed because of the scann library
#try:
# from grad_cache import GradCache
# _grad_cache_available = True
#except ModuleNotFoundError:
# _grad_cache_available = False
logging.set_verbosity_warning()
sys.path.append(os.getcwd())
console = Console()
if __name__ == "__main__":
#
# config
#
args = get_parser().parse_args()
from_scratch = True
train_mode = "Train"
if args.continue_folder:
train_mode = "Evaluate"
from_scratch = False
run_folder = args.continue_folder
config = get_config_single(os.path.join(run_folder, "config.yaml"), args.config_overwrites)
else:
if not args.run_name:
raise Exception("--run-name must be set (or continue-folder)")
config = get_config(args.config_file, args.config_overwrites)
run_folder = prepare_experiment(args, config)
logger = get_logger_to_file(run_folder, "main")
logger.info("Running: %s", str(sys.argv))
tb_writer = SummaryWriter(run_folder)
print_hello(config,run_folder,train_mode)
#
# random seeds
#
torch.manual_seed(config["random_seed"])
numpy.random.seed(config["random_seed"])
random.seed(config["random_seed"])
logger.info("Torch seed: %i ",torch.initial_seed())
# hardcode gpu usage
cuda_device = 0 # always take the first -> set others via cuda flag in bash
perf_monitor = PerformanceMonitor.get()
perf_monitor.start_block("startup")
#
# create the training subset in case of subset training
# -------------------------------
#
train_subset = config.get("train_subset", False)
train_subset_incrementally = config.get('train_subset_incrementally', False)
train_subset_warmstart = config.get('train_subset_warmstart', False)
train_subset_control_topic_no = config.get('train_subset_control_topic_no', -1)
train_subset_firstk = config.get('train_subset_firstk', False)
triplet_no_per_topic = config.get('triplet_no_per_topic', 10)
if train_subset:
if not train_subset_incrementally:
if train_subset_control_topic_no > -1:
train_file_path = generate_subset_control_topic_no(config["train_tsv"], run_folder, config['train_subset_control_topic_no'], config["random_seed"], triplet_no_per_topic)
elif train_subset_firstk:
train_file_path = generate_train_subset_from_train_firstk(config["train_tsv"], run_folder, config['train_data_size'])
else:
train_file_path = generate_train_subset_from_train(config["train_tsv"], run_folder, config['train_data_size'], config["random_seed"])
config["train_tsv"] = train_file_path
else:
if not train_subset_warmstart:
if train_subset_control_topic_no > -1:
train_file_path = generate_subset_control_incrementally(config["train_tsv"], run_folder, config['train_subset_control_topic_no'],
config["random_seed"], triplet_no_per_topic,
config['expirement_base_path'],
config['previous_exp_name'])
else:
train_file_path = generate_train_subset_incrementally(config["train_tsv"], run_folder, config['train_data_size'],
config["random_seed"], config['expirement_base_path'], config['previous_exp_name'])
else:
if train_subset_control_topic_no > -1:
train_file_path, warmstart_path = generate_subset_control_incrementally(config["train_tsv"], run_folder, config['train_subset_control_topic_no'],
config["random_seed"], triplet_no_per_topic,
config['expirement_base_path'],
config['previous_exp_name'], warmstart_model=True)
else:
train_file_path, warmstart_path = generate_train_subset_incrementally(config["train_tsv"], run_folder, config['train_data_size'],
config["random_seed"], config['expirement_base_path'], config['previous_exp_name'], warmstart_model=True)
config["warmstart_model_path"] = warmstart_path
config["train_tsv"] = train_file_path
#
# create the training nfold subset
# -------------------------------
#
train_subset_nfold = config.get("train_subset_nfold", False)
if train_subset_nfold:
if config['nfold_sampling'] == 'nfold':
print('use nfolds from {} to {}'.format(config["fold_lb"], config["fold_ub"]))
else:
print('use random sampling for fold')
# now i use the config["train_tsv"] to subsample the size!
create_nfold_train_test_data(config['collection'], config['queries'], run_folder, config['candidate_path'],
config['validation_cont']['qrels'], sampling=config['nfold_sampling'],
fold_lb=config.get("fold_lb", 0), fold_ub=config.get('fold_ub', 0),
train_size=config.get('fold_train_size', 0), index_dir=config.get('index_dir', ''),
n_samples_per_query=config.get('n_samples_per_query', -1))
config["train_tsv"] = os.path.join(run_folder, 'train_triples_nfold.tsv')
config["validation_cont"]["tsv"] = os.path.join(run_folder, 'test_nfold_queries_rerank.tsv')
config["test"]["top1000_description"]["tsv"] = os.path.join(run_folder, 'test_nfold_queries.tsv')
#
# create (and load) model instance
# -------------------------------
#
# load candidate set for efficient cs@N validation
validation_cont_candidate_set = None
if from_scratch and "candidate_set_path" in config["validation_cont"]:
validation_cont_candidate_set = parse_candidate_set(config["validation_cont"]["candidate_set_path"],config["validation_cont"]["candidate_set_from_to"][1])
word_embedder, padding_idx = get_word_embedder(config) | model, encoder_type = get_model(config,word_embedder,padding_idx) | 2 | 2023-11-21 10:38:22+00:00 | 12k |
dmamakas2000/ipo | experiments/ipo.py | [
{
"identifier": "FinancialHierarchicalBert",
"path": "models/financial_features_hierarchical_bert.py",
"snippet": "class FinancialHierarchicalBert(nn.Module):\n\n def __init__(self, config, encoder, max_segments=64, max_segment_length=128, max_pooled=False):\n super(FinancialHierarchicalBert, ... | import os
import sys
import glob
import shutil
import random
import logging
import datasets
import transformers
from typing import Optional
from datasets import load_dataset
from dataclasses import dataclass, field
from models.financial_features_hierarchical_bert import FinancialHierarchicalBert, \
HierarchicalBertFinancialModelForSequenceClassification
from trainer.trainer import BinaryTrainer
from transformers.utils import check_min_version
from models.hierarchical_bert import HierarchicalBert
from transformers.utils.versions import require_version
from transformers.trainer_utils import get_last_checkpoint
from trainer.financial_features_trainer import FinancialTrainer
from functions.functions import segment_string, new_compute_metrics
from models.max_pooled_bert import BertMaxPooledForSequenceClassification
from models.financial_features_bert import BertFinancialModelForSequenceClassification
from models.max_pooled_financial_features_bert import FinancialBertMaxPooledForSequenceClassification
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
HfArgumentParser,
TrainingArguments,
default_data_collator,
set_seed,
) | 10,126 | model = HierarchicalBertFinancialModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
segment_encoder = model.bert
model_encoder = FinancialHierarchicalBert(config=config,
encoder=segment_encoder,
max_segments=data_args.max_segments,
max_segment_length=data_args.max_seg_length,
max_pooled=model_args.max_pooled)
model.bert = model_encoder
else:
"""
Scenario 6: Hierarchical-BERT.
"""
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
segment_encoder = model.bert
model_encoder = HierarchicalBert(encoder=segment_encoder,
max_segments=data_args.max_segments,
max_segment_length=data_args.max_seg_length,
max_pooled=model_args.max_pooled)
model.bert = model_encoder
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
padding = False
def preprocess_function(examples):
"""
Preprocesses the examples of a specific batch.
"""
if model_args.hierarchical:
case_template = [[0] * data_args.max_seg_length]
batch = {'input_ids': [], 'attention_mask': [], 'token_type_ids': []}
for doc in examples['text']:
doc = segment_string(doc, data_args.max_seg_length)
doc_encodings = tokenizer(doc[:data_args.max_segments], padding=padding,
max_length=data_args.max_seg_length, truncation=True)
batch['input_ids'].append(doc_encodings['input_ids'] + case_template * (
data_args.max_segments - len(doc_encodings['input_ids'])))
batch['attention_mask'].append(doc_encodings['attention_mask'] + case_template * (
data_args.max_segments - len(doc_encodings['attention_mask'])))
batch['token_type_ids'].append(doc_encodings['token_type_ids'] + case_template * (
data_args.max_segments - len(doc_encodings['token_type_ids'])))
else:
# Tokenize the texts
batch = tokenizer(
examples["text"],
padding=padding,
max_length=data_args.max_seq_length,
truncation=True,
)
# batch["labels"] = [[1 if labels == label else 0 for label in label_list] for labels in examples["class"]]
batch["labels"] = [[0 if labels == label else 1 for label in label_list] for labels in examples["class"]]
if model_args.concatenate_financial_features:
batch['financial_features'] = examples['financial']
return batch
# If training, apply the preprocessing and log a few random samples
if training_args.do_train:
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on train dataset",
)
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# If evaluating, apply the preprocessing and log a few random samples
if training_args.do_eval:
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
# If predicting, apply the preprocessing and log a few random samples
if training_args.do_predict:
if data_args.max_predict_samples is not None:
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
with training_args.main_process_first(desc="prediction dataset map pre-processing"):
predict_dataset = predict_dataset.map(
preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on prediction dataset",
)
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
# Trainer
if model_args.concatenate_financial_features:
| #!/usr/bin/env python
# coding=utf-8
"""
File: ipo.py
Author: Dimitrios Mamakas (Athens University of Economics and Business)
Date: November 22, 2023
Description: Implementation of the following BERT-based and Hierarchical-BERT-based variants.
• bert-tx-cls-512
• bert-txff-cls-512
• bert-tx-max-512
• bert-txff-max-512
• hierbert-tx-cls-8192
• hierbert-txff-cls-8192
• hierbert-tx-cls-20480
• hierbert-txff-cls-20480
License:
This code is provided under the MIT License.
You are free to copy, modify, and distribute the code.
If you use this code in your research, please include a reference to the original study (please visit the home page).
"""
# Will error if the minimal version of Transformers is not installed
check_min_version("4.9.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
logger = logging.getLogger(__name__)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
max_seq_length: Optional[int] = field(
default=512,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
max_segments: Optional[int] = field(
default=64,
metadata={
"help": "The maximum number of segments (paragraphs) to be considered. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
max_seg_length: Optional[int] = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
train_dataset_dir: Optional[str] = field(
default=None,
metadata={
"help": "The directory to retrieve the training dataset from."
}
)
eval_dataset_dir: Optional[str] = field(
default=None,
metadata={
"help": "The directory to retrieve the evaluation dataset from."
}
)
test_dataset_dir: Optional[str] = field(
default=None,
metadata={
"help": "The directory to retrieve the test dataset from."
}
)
server_ip: Optional[str] = field(default=None, metadata={"help": "For distant debugging."})
server_port: Optional[str] = field(default=None, metadata={"help": "For distant debugging."})
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
default=None, metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
do_lower_case: Optional[bool] = field(
default=True,
metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
}
)
max_pooled: bool = field(
default=False,
metadata={
"help": "Whether to use a max-pooled embedding as an input into the classification head."
"If set to False, the CLS embedding will be used to perform the classification."
}
)
hierarchical: bool = field(
default=False, metadata={"help": "Whether to use a hierarchical variant or not."}
)
concatenate_financial_features: bool = field(
default=False, metadata={"help": "Whether to concatenate the financial features among with the textual, or not."}
)
reduction_features: int = field(
default=8,
metadata={
"help": "The number of output BERT features to keep in case it is asked."
},
)
multiple_dense_layers: bool = field(
default=True,
metadata={
"help": "Whether to use a second dense layer on top of the first one (if selected), or not."
},
)
threshold: float = field(
default=0.5,
metadata={
"help": "The threshold to classify texts with."
}
)
def main():
"""
Main method.
"""
# Arguments
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Fix boolean parameter
if model_args.do_lower_case == 'False' or not model_args.do_lower_case:
model_args.do_lower_case = False
'Tokenizer do_lower_case False'
else:
model_args.do_lower_case = True
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Load the dataset splits
if training_args.do_train:
train_dataset = load_dataset("json", data_files=data_args.train_dataset_dir, split="train",
cache_dir=model_args.cache_dir)
if training_args.do_eval:
eval_dataset = load_dataset("json", data_files=data_args.eval_dataset_dir, split="train",
cache_dir=model_args.cache_dir)
if training_args.do_predict:
predict_dataset = load_dataset("json", data_files=data_args.test_dataset_dir, split="train",
cache_dir=model_args.cache_dir)
# Labels
label_list = list(range(1))
num_labels = len(label_list)
# Config
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Set additional parameters to control the flow of the experiments
config.reduction_features = model_args.reduction_features
config.multiple_dense_layers = model_args.multiple_dense_layers
# Tokenizer
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
do_lower_case=model_args.do_lower_case,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Check all possible combinations for choosing model and setting
if not model_args.hierarchical:
if model_args.max_pooled:
if model_args.concatenate_financial_features:
"""
Scenario 1: BERT (max-pooled) using financial embeddings.
"""
model = FinancialBertMaxPooledForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
"""
Scenario 2: BERT (max-pooled).
"""
model = BertMaxPooledForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
if model_args.concatenate_financial_features:
"""
Scenario 3: BERT (cls-pooled) using financial embeddings.
"""
model = BertFinancialModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
"""
Scenario 4: BERT (cls-pooled).
"""
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
if model_args.hierarchical:
if model_args.concatenate_financial_features:
"""
Scenario 5: Hierarchical-BERT using financial embeddings.
"""
model = HierarchicalBertFinancialModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
segment_encoder = model.bert
model_encoder = FinancialHierarchicalBert(config=config,
encoder=segment_encoder,
max_segments=data_args.max_segments,
max_segment_length=data_args.max_seg_length,
max_pooled=model_args.max_pooled)
model.bert = model_encoder
else:
"""
Scenario 6: Hierarchical-BERT.
"""
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
segment_encoder = model.bert
model_encoder = HierarchicalBert(encoder=segment_encoder,
max_segments=data_args.max_segments,
max_segment_length=data_args.max_seg_length,
max_pooled=model_args.max_pooled)
model.bert = model_encoder
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
padding = False
def preprocess_function(examples):
"""
Preprocesses the examples of a specific batch.
"""
if model_args.hierarchical:
case_template = [[0] * data_args.max_seg_length]
batch = {'input_ids': [], 'attention_mask': [], 'token_type_ids': []}
for doc in examples['text']:
doc = segment_string(doc, data_args.max_seg_length)
doc_encodings = tokenizer(doc[:data_args.max_segments], padding=padding,
max_length=data_args.max_seg_length, truncation=True)
batch['input_ids'].append(doc_encodings['input_ids'] + case_template * (
data_args.max_segments - len(doc_encodings['input_ids'])))
batch['attention_mask'].append(doc_encodings['attention_mask'] + case_template * (
data_args.max_segments - len(doc_encodings['attention_mask'])))
batch['token_type_ids'].append(doc_encodings['token_type_ids'] + case_template * (
data_args.max_segments - len(doc_encodings['token_type_ids'])))
else:
# Tokenize the texts
batch = tokenizer(
examples["text"],
padding=padding,
max_length=data_args.max_seq_length,
truncation=True,
)
# batch["labels"] = [[1 if labels == label else 0 for label in label_list] for labels in examples["class"]]
batch["labels"] = [[0 if labels == label else 1 for label in label_list] for labels in examples["class"]]
if model_args.concatenate_financial_features:
batch['financial_features'] = examples['financial']
return batch
# If training, apply the preprocessing and log a few random samples
if training_args.do_train:
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on train dataset",
)
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# If evaluating, apply the preprocessing and log a few random samples
if training_args.do_eval:
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
# If predicting, apply the preprocessing and log a few random samples
if training_args.do_predict:
if data_args.max_predict_samples is not None:
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
with training_args.main_process_first(desc="prediction dataset map pre-processing"):
predict_dataset = predict_dataset.map(
preprocess_function,
batched=True,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on prediction dataset",
)
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
# Trainer
if model_args.concatenate_financial_features: | trainer = FinancialTrainer( | 4 | 2023-11-22 18:46:55+00:00 | 12k |
barseghyanartur/fake.py | examples/customization/override_default_data.py | [
{
"identifier": "Factory",
"path": "fake.py",
"snippet": "class Factory:\n \"\"\"Factory.\"\"\"\n\n def __init__(self, faker: Optional[Faker] = None) -> None:\n # Directly use the setter to ensure provider methods are added\n self.faker = faker or FAKER\n\n @property\n def fake... | from fake import Factory, Faker
from data import FIRST_NAMES, LAST_NAMES, WORDS | 7,989 |
__author__ = "Artur Barseghyan <artur.barseghyan@gmail.com>"
__copyright__ = "2023 Artur Barseghyan"
__license__ = "MIT"
__all__ = (
"FACTORY",
"FAKER",
)
class FakerOverrideDefaultData(Faker):
"""Faker class for custom names and words."""
def load_names(self) -> None:
"""Override default first- and last-names dictionaries."""
self._first_names = FIRST_NAMES
self._last_names = LAST_NAMES
def load_words(self) -> None:
"""Override default words dictionary."""
self._words = WORDS
FAKER = FakerOverrideDefaultData(alias="override_default_data")
|
__author__ = "Artur Barseghyan <artur.barseghyan@gmail.com>"
__copyright__ = "2023 Artur Barseghyan"
__license__ = "MIT"
__all__ = (
"FACTORY",
"FAKER",
)
class FakerOverrideDefaultData(Faker):
"""Faker class for custom names and words."""
def load_names(self) -> None:
"""Override default first- and last-names dictionaries."""
self._first_names = FIRST_NAMES
self._last_names = LAST_NAMES
def load_words(self) -> None:
"""Override default words dictionary."""
self._words = WORDS
FAKER = FakerOverrideDefaultData(alias="override_default_data") | FACTORY = Factory(FAKER) | 0 | 2023-11-24 21:36:14+00:00 | 12k |
Yifei-Y/Openset-RCNN | openset_rcnn/modeling/roi_heads/osrcnn_roi_heads.py | [
{
"identifier": "OpensetFastRCNNOutputLayers",
"path": "openset_rcnn/modeling/roi_heads/osrcnn_fast_rcnn.py",
"snippet": "class OpensetFastRCNNOutputLayers(nn.Module):\n \"\"\"\n Two linear layers for predicting Fast R-CNN outputs:\n\n 1. proposal-to-detection box regression deltas\n 2. iou\... | import inspect
import logging
import numpy as np
import torch
from typing import Dict, List, Optional, Tuple
from torch import nn
from detectron2.config import configurable
from detectron2.layers import ShapeSpec
from detectron2.structures import ImageList, Instances, pairwise_iou
from detectron2.utils.events import get_event_storage
from detectron2.modeling.roi_heads.roi_heads import ROIHeads, ROI_HEADS_REGISTRY
from detectron2.modeling.poolers import ROIPooler
from detectron2.modeling.proposal_generator.proposal_utils import add_ground_truth_to_proposals
from detectron2.modeling.roi_heads.box_head import build_box_head
from .osrcnn_fast_rcnn import OpensetFastRCNNOutputLayers
from .softmax_classifier import SoftMaxClassifier
from .prototype_learning_network import PLN | 8,876 | # Copyright (c) Facebook, Inc. and its affiliates.
logger = logging.getLogger(__name__)
@ROI_HEADS_REGISTRY.register()
class OpensetROIHeads(ROIHeads):
"""
Openset RoI Head.
1. RoI Align and FC
2. * bbox regression
* iou prediction
* prototype learning
"""
@configurable
def __init__(
self,
*,
box_in_features: List[str],
box_pooler: ROIPooler,
box_head: nn.Module,
box_predictor: nn.Module,
dml: nn.Module,
softmaxcls: nn.Module,
train_on_pred_boxes: bool = False,
**kwargs,
):
"""
NOTE: this interface is experimental.
Args:
box_in_features (list[str]): list of feature names to use for the box head.
box_pooler (ROIPooler): pooler to extra region features for box head
box_head (nn.Module): transform features to make box predictions
box_predictor (nn.Module): make box predictions from the feature.
Should have the same interface as :class:`FastRCNNOutputLayers`.
dml: metric learning class.
softmaxcls: softmax classifier class.
train_on_pred_boxes (bool): whether to use proposal boxes or
predicted boxes from the box head to train other heads.
"""
super().__init__(**kwargs)
# keep self.in_features for backward compatibility
self.in_features = self.box_in_features = box_in_features
self.box_pooler = box_pooler
self.box_head = box_head
self.box_predictor = box_predictor
self.train_on_pred_boxes = train_on_pred_boxes
self.dml = dml
self.softmaxcls = softmaxcls
@classmethod
def from_config(cls, cfg, input_shape):
ret = super().from_config(cfg)
ret["train_on_pred_boxes"] = cfg.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES
# Subclasses that have not been updated to use from_config style construction
# may have overridden _init_*_head methods. In this case, those overridden methods
# will not be classmethods and we need to avoid trying to call them here.
# We test for this with ismethod which only returns True for bound methods of cls.
# Such subclasses will need to handle calling their overridden _init_*_head methods.
if inspect.ismethod(cls._init_box_head):
ret.update(cls._init_box_head(cfg, input_shape))
return ret
@classmethod
def _init_box_head(cls, cfg, input_shape):
# fmt: off
in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
# fmt: on
# If StandardROIHeads is applied on multiple feature maps (as in FPN),
# then we share the same predictors and therefore the channel counts must be the same
in_channels = [input_shape[f].channels for f in in_features]
# Check all channel counts are equal
assert len(set(in_channels)) == 1, in_channels
in_channels = in_channels[0]
box_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
# Here we split "box head" and "box predictor", which is mainly due to historical reasons.
# They are used together so the "box predictor" layers should be part of the "box head".
# New subclasses of ROIHeads do not need "box predictor"s.
box_head = build_box_head(
cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution)
)
box_predictor = OpensetFastRCNNOutputLayers(cfg, box_head.output_shape)
| # Copyright (c) Facebook, Inc. and its affiliates.
logger = logging.getLogger(__name__)
@ROI_HEADS_REGISTRY.register()
class OpensetROIHeads(ROIHeads):
"""
Openset RoI Head.
1. RoI Align and FC
2. * bbox regression
* iou prediction
* prototype learning
"""
@configurable
def __init__(
self,
*,
box_in_features: List[str],
box_pooler: ROIPooler,
box_head: nn.Module,
box_predictor: nn.Module,
dml: nn.Module,
softmaxcls: nn.Module,
train_on_pred_boxes: bool = False,
**kwargs,
):
"""
NOTE: this interface is experimental.
Args:
box_in_features (list[str]): list of feature names to use for the box head.
box_pooler (ROIPooler): pooler to extra region features for box head
box_head (nn.Module): transform features to make box predictions
box_predictor (nn.Module): make box predictions from the feature.
Should have the same interface as :class:`FastRCNNOutputLayers`.
dml: metric learning class.
softmaxcls: softmax classifier class.
train_on_pred_boxes (bool): whether to use proposal boxes or
predicted boxes from the box head to train other heads.
"""
super().__init__(**kwargs)
# keep self.in_features for backward compatibility
self.in_features = self.box_in_features = box_in_features
self.box_pooler = box_pooler
self.box_head = box_head
self.box_predictor = box_predictor
self.train_on_pred_boxes = train_on_pred_boxes
self.dml = dml
self.softmaxcls = softmaxcls
@classmethod
def from_config(cls, cfg, input_shape):
ret = super().from_config(cfg)
ret["train_on_pred_boxes"] = cfg.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES
# Subclasses that have not been updated to use from_config style construction
# may have overridden _init_*_head methods. In this case, those overridden methods
# will not be classmethods and we need to avoid trying to call them here.
# We test for this with ismethod which only returns True for bound methods of cls.
# Such subclasses will need to handle calling their overridden _init_*_head methods.
if inspect.ismethod(cls._init_box_head):
ret.update(cls._init_box_head(cfg, input_shape))
return ret
@classmethod
def _init_box_head(cls, cfg, input_shape):
# fmt: off
in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
# fmt: on
# If StandardROIHeads is applied on multiple feature maps (as in FPN),
# then we share the same predictors and therefore the channel counts must be the same
in_channels = [input_shape[f].channels for f in in_features]
# Check all channel counts are equal
assert len(set(in_channels)) == 1, in_channels
in_channels = in_channels[0]
box_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
# Here we split "box head" and "box predictor", which is mainly due to historical reasons.
# They are used together so the "box predictor" layers should be part of the "box head".
# New subclasses of ROIHeads do not need "box predictor"s.
box_head = build_box_head(
cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution)
)
box_predictor = OpensetFastRCNNOutputLayers(cfg, box_head.output_shape)
| dml = PLN(cfg) | 2 | 2023-11-21 01:47:01+00:00 | 12k |
MICLab-Unicamp/medpseg | medpseg/edet/modeling_efficientdet.py | [
{
"identifier": "SelfAttention",
"path": "medpseg/self_attention.py",
"snippet": "class SelfAttention(nn.Module):\n '''\n Spatial attention module, with 1x1 convolutions, idea from\n ASSESSING KNEE OA SEVERITY WITH CNN ATTENTION-BASED END-TO-END ARCHITECTURES\n '''\n def __init__(self, in... | from typing import List, Optional, Tuple, Union
from collections import OrderedDict
from torch import nn
from medpseg.self_attention import SelfAttention
from medpseg.edet.efficientnet.utils import MemoryEfficientSwish
from medpseg.edet.efficientdet.model import BiFPN, EfficientNet, SegmentationClasssificationHead, CirculatoryBranch
import torch | 8,881 | elif key == "seg_exponential_stride_compression":
self.feature_adapters = nn.ModuleList([nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=11, padding=5, stride=128, dilation=6, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False)),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=9, padding=4, stride=64, dilation=5, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False)),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=7, padding=3, stride=32, dilation=4, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False)),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=5, padding=2, stride=16, dilation=3, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False)),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=3, padding=1, stride=8, dilation=2, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False))])
self.upsampler = nn.UpsamplingBilinear2d(scale_factor=2)
self.pooling = nn.AdaptiveAvgPool2d(1)
elif key == "nonlinear_esc": # Save this for future embbedding building for transformers
# Reduced stride progression, trusting average pooling, makes network work with 128x128 inputs minimum
self.feature_adapters = nn.ModuleList([nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=11, padding=5, stride=64, dilation=4, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False),
nn.LeakyReLU()),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=9, padding=4, stride=32, dilation=3, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False),
nn.LeakyReLU()),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=7, padding=3, stride=16, dilation=3, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False),
nn.LeakyReLU()),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=5, padding=2, stride=8, dilation=2, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False),
nn.LeakyReLU()),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=3, padding=1, stride=4, dilation=2, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False),
nn.LeakyReLU())])
self.upsampler = nn.UpsamplingBilinear2d(scale_factor=2)
self.pooling = nn.AdaptiveAvgPool2d(1)
else:
raise ValueError(f"Unsupported feature adapter {key}. Use one of {FeatureFusion.SUPPORTED_STRATS}")
self.latent_space = None
def get_latent_space(self):
# Save this for future transformer involvement
B, C, _, _ = self.latent_space.shape
return self.latent_space.reshape(B, C)
def forward(self, in_features: List[torch.Tensor]) -> Optional[torch.Tensor]:
out_features = None
for feature_adapter, in_feature in zip(self.feature_adapters, in_features):
if out_features is None: # first thing
out_features = feature_adapter(in_feature)
elif self.key == "upsample_cat":
out_features = torch.cat([out_features, feature_adapter(in_feature)], dim=1) # upsample cat concatenates in channel dimension
else:
out_features += feature_adapter(in_feature)
if self.key in ["nonlinear_esc", "seg_exponential_stride_compression"]:
self.latent_space = self.pooling(out_features)
return self.upsampler(in_features[0]) * self.latent_space # latent space weights channel contributions
else:
return out_features
class EfficientDetForSemanticSegmentation(nn.Module):
def __init__(self,
load_weights:bool = True,
num_classes: int = 2,
compound_coef: int = 4,
repeat: int = 3,
expand_bifpn: Union[bool, str] = False,
backbone: str = "effnet",
circulatory_branch: bool = False,
bifpn_channels: int = 128,
squeeze:bool = False,
deep_supervision: bool = False,
self_attention: bool = False,
soft_circulatory: bool = False,
**kwargs): # dump for old variables
'''
load_weights: wether to load pre trained as backbone
num_classes: number of classes for primary downstream segmentation task
compound_coef: which efficientnet variation to base the architecture of, only supports 4.
repeat: how many conv blocks on the segmentation head
expand_bifpn: how to expand the bifpn features. Upsample is best
backbone: efficientnet or convnext as backbone
num_classes_aux: number of classes for secondary segmentation task. If None will not initialize second output.
'''
super().__init__()
for k, v in kwargs.items():
print(f"WARNING: MEDSeg Argument {k}={v} being ignored")
self.compound_coef = compound_coef
self.backbone_compound_coef = [0, 1, 2, 3, 4, 5, 6, 7]
self.input_sizes = [512, 640, 768, 896, 1024, 1280, 1280, 1536]
self.num_classes = num_classes
self.expand_bifpn = expand_bifpn
self.backbone = backbone
self.self_attention = self_attention
self.deep_supervision = deep_supervision
if self.self_attention:
self.attention_modules: nn.ModuleList = nn.ModuleList([SelfAttention(bifpn_channels, dim='2d') for _ in range(5)])
if self.expand_bifpn == "upsample_cat":
self.upsample_cat_scaling = 5
else:
self.upsample_cat_scaling = 1 # scale expected input of segmentation heads
# Check if expand_bifpn requires
feature_fusion = self.set_expand_conv()
conv_channel_coef = {
# the channels of P2/P3/P4.
0: [16, 24, 40],
4: [24, 32, 56],
6: [32, 40, 72],
7: [32, 48, 80],
-1: [96, 192, 384]
}
if self.backbone == "convnext":
print("Changing compound coeff of BiFPN due to convnext backbone")
compound_coef = -1
print(f"Convnext upsample scale {self.convnext_upsample_scale}")
| '''
Copyright (c) Diedre Carmo, Medical Imaging Computing Lab (MICLab https://miclab.fee.unicamp.br/.
https://github.com/MICLab-Unicamp/medpseg
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Original Author: Zylo117
Modified by Israel, adopted by Diedre as an initial EfficientDet implementation and extended to MEDPSeg related implementations
April 2023: removed code not directly related to MEDSeg and extra deprecations
'''
class FeatureFusion(nn.Module):
'''
Feature fusion module that makes use of all BiFPN features for segmentation instead of only
upsampling the highest spatial resolution.
upsample_sum: upsamples and sums all features
(ESC) exponential_stride_compression: increases kernel size and dilation and exponentially increases the stride to compress features, from B, C, x, y into a B, C, x/256, y/256 array that can be linearized easily with reshape. Minimum input size 256x256.
seg_exponential_stride_compression: use values derived from ESC to weight high resolution features
'''
SUPPORTED_STRATS = ["cat", "upsample_sum", "upsample_cat", "exponential_stride_compression", "seg_exponential_stride_compression", "nonlinear_esc"]
def __init__(self, in_c: int, out_c: int, key: Union[bool, str]):
super().__init__()
print(f"SELECTING FEATURE ADAPTER: {key}")
self.key = key
if key == "cat":
# Concatenate features without over upsampling (results in features /2 the spatial resolution of the input)
self.feature_adapters = nn.ModuleList([nn.Identity(),
nn.UpsamplingBilinear2d(scale_factor=2),
nn.UpsamplingBilinear2d(scale_factor=4),
nn.UpsamplingBilinear2d(scale_factor=8),
nn.UpsamplingBilinear2d(scale_factor=16)])
elif key == "upsample_sum" or key == "upsample_cat":
self.feature_adapters = nn.ModuleList([nn.UpsamplingBilinear2d(scale_factor=2),
nn.UpsamplingBilinear2d(scale_factor=4),
nn.UpsamplingBilinear2d(scale_factor=8),
nn.UpsamplingBilinear2d(scale_factor=16),
nn.UpsamplingBilinear2d(scale_factor=32)])
elif key == "exponential_stride_compression":
self.feature_adapters = nn.ModuleList([nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=11, padding=5, stride=128, dilation=6, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False)),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=9, padding=4, stride=64, dilation=5, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False)),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=7, padding=3, stride=32, dilation=4, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False)),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=5, padding=2, stride=16, dilation=3, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False)),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=3, padding=1, stride=8, dilation=2, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False))])
elif key == "seg_exponential_stride_compression":
self.feature_adapters = nn.ModuleList([nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=11, padding=5, stride=128, dilation=6, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False)),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=9, padding=4, stride=64, dilation=5, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False)),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=7, padding=3, stride=32, dilation=4, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False)),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=5, padding=2, stride=16, dilation=3, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False)),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=3, padding=1, stride=8, dilation=2, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False))])
self.upsampler = nn.UpsamplingBilinear2d(scale_factor=2)
self.pooling = nn.AdaptiveAvgPool2d(1)
elif key == "nonlinear_esc": # Save this for future embbedding building for transformers
# Reduced stride progression, trusting average pooling, makes network work with 128x128 inputs minimum
self.feature_adapters = nn.ModuleList([nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=11, padding=5, stride=64, dilation=4, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False),
nn.LeakyReLU()),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=9, padding=4, stride=32, dilation=3, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False),
nn.LeakyReLU()),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=7, padding=3, stride=16, dilation=3, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False),
nn.LeakyReLU()),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=5, padding=2, stride=8, dilation=2, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False),
nn.LeakyReLU()),
nn.Sequential(nn.Conv2d(in_channels=in_c, out_channels=in_c, kernel_size=3, padding=1, stride=4, dilation=2, bias=False, groups=in_c),
nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=1, padding=0, stride=1, bias=False),
nn.LeakyReLU())])
self.upsampler = nn.UpsamplingBilinear2d(scale_factor=2)
self.pooling = nn.AdaptiveAvgPool2d(1)
else:
raise ValueError(f"Unsupported feature adapter {key}. Use one of {FeatureFusion.SUPPORTED_STRATS}")
self.latent_space = None
def get_latent_space(self):
# Save this for future transformer involvement
B, C, _, _ = self.latent_space.shape
return self.latent_space.reshape(B, C)
def forward(self, in_features: List[torch.Tensor]) -> Optional[torch.Tensor]:
out_features = None
for feature_adapter, in_feature in zip(self.feature_adapters, in_features):
if out_features is None: # first thing
out_features = feature_adapter(in_feature)
elif self.key == "upsample_cat":
out_features = torch.cat([out_features, feature_adapter(in_feature)], dim=1) # upsample cat concatenates in channel dimension
else:
out_features += feature_adapter(in_feature)
if self.key in ["nonlinear_esc", "seg_exponential_stride_compression"]:
self.latent_space = self.pooling(out_features)
return self.upsampler(in_features[0]) * self.latent_space # latent space weights channel contributions
else:
return out_features
class EfficientDetForSemanticSegmentation(nn.Module):
def __init__(self,
load_weights:bool = True,
num_classes: int = 2,
compound_coef: int = 4,
repeat: int = 3,
expand_bifpn: Union[bool, str] = False,
backbone: str = "effnet",
circulatory_branch: bool = False,
bifpn_channels: int = 128,
squeeze:bool = False,
deep_supervision: bool = False,
self_attention: bool = False,
soft_circulatory: bool = False,
**kwargs): # dump for old variables
'''
load_weights: wether to load pre trained as backbone
num_classes: number of classes for primary downstream segmentation task
compound_coef: which efficientnet variation to base the architecture of, only supports 4.
repeat: how many conv blocks on the segmentation head
expand_bifpn: how to expand the bifpn features. Upsample is best
backbone: efficientnet or convnext as backbone
num_classes_aux: number of classes for secondary segmentation task. If None will not initialize second output.
'''
super().__init__()
for k, v in kwargs.items():
print(f"WARNING: MEDSeg Argument {k}={v} being ignored")
self.compound_coef = compound_coef
self.backbone_compound_coef = [0, 1, 2, 3, 4, 5, 6, 7]
self.input_sizes = [512, 640, 768, 896, 1024, 1280, 1280, 1536]
self.num_classes = num_classes
self.expand_bifpn = expand_bifpn
self.backbone = backbone
self.self_attention = self_attention
self.deep_supervision = deep_supervision
if self.self_attention:
self.attention_modules: nn.ModuleList = nn.ModuleList([SelfAttention(bifpn_channels, dim='2d') for _ in range(5)])
if self.expand_bifpn == "upsample_cat":
self.upsample_cat_scaling = 5
else:
self.upsample_cat_scaling = 1 # scale expected input of segmentation heads
# Check if expand_bifpn requires
feature_fusion = self.set_expand_conv()
conv_channel_coef = {
# the channels of P2/P3/P4.
0: [16, 24, 40],
4: [24, 32, 56],
6: [32, 40, 72],
7: [32, 48, 80],
-1: [96, 192, 384]
}
if self.backbone == "convnext":
print("Changing compound coeff of BiFPN due to convnext backbone")
compound_coef = -1
print(f"Convnext upsample scale {self.convnext_upsample_scale}")
| self.bifpn = nn.Sequential(*[BiFPN(bifpn_channels, | 2 | 2023-11-21 20:03:33+00:00 | 12k |
amikey/Fooocus | fooocus_extras/facexlib/detection/retinaface.py | [
{
"identifier": "get_reference_facial_points",
"path": "fooocus_extras/facexlib/detection/align_trans.py",
"snippet": "def get_reference_facial_points(output_size=None, inner_padding_factor=0.0, outer_padding=(0, 0), default_square=False):\n \"\"\"\n Function:\n ----------\n get referenc... | import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from PIL import Image
from torchvision.models._utils import IntermediateLayerGetter as IntermediateLayerGetter
from fooocus_extras.facexlib.detection.align_trans import get_reference_facial_points, warp_and_crop_face
from fooocus_extras.facexlib.detection.retinaface_net import FPN, SSH, MobileNetV1, make_bbox_head, make_class_head, make_landmark_head
from fooocus_extras.facexlib.detection.retinaface_utils import (PriorBox, batched_decode, batched_decode_landm, decode, decode_landm,
py_cpu_nms) | 7,929 | boxes = boxes * self.scale / self.resize
boxes = boxes.cpu().numpy()
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
landmarks = decode_landm(landmarks.squeeze(0), priors, self.cfg['variance'])
landmarks = landmarks * self.scale1 / self.resize
landmarks = landmarks.cpu().numpy()
# ignore low scores
inds = np.where(scores > conf_threshold)[0]
boxes, landmarks, scores = boxes[inds], landmarks[inds], scores[inds]
# sort
order = scores.argsort()[::-1]
boxes, landmarks, scores = boxes[order], landmarks[order], scores[order]
# do NMS
bounding_boxes = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = py_cpu_nms(bounding_boxes, nms_threshold)
bounding_boxes, landmarks = bounding_boxes[keep, :], landmarks[keep]
# self.t['forward_pass'].toc()
# print(self.t['forward_pass'].average_time)
# import sys
# sys.stdout.flush()
return np.concatenate((bounding_boxes, landmarks), axis=1)
def __align_multi(self, image, boxes, landmarks, limit=None):
if len(boxes) < 1:
return [], []
if limit:
boxes = boxes[:limit]
landmarks = landmarks[:limit]
faces = []
for landmark in landmarks:
facial5points = [[landmark[2 * j], landmark[2 * j + 1]] for j in range(5)]
warped_face = warp_and_crop_face(np.array(image), facial5points, self.reference, crop_size=(112, 112))
faces.append(warped_face)
return np.concatenate((boxes, landmarks), axis=1), faces
def align_multi(self, img, conf_threshold=0.8, limit=None):
rlt = self.detect_faces(img, conf_threshold=conf_threshold)
boxes, landmarks = rlt[:, 0:5], rlt[:, 5:]
return self.__align_multi(img, boxes, landmarks, limit)
# batched detection
def batched_transform(self, frames, use_origin_size):
"""
Arguments:
frames: a list of PIL.Image, or torch.Tensor(shape=[n, h, w, c],
type=np.float32, BGR format).
use_origin_size: whether to use origin size.
"""
from_PIL = True if isinstance(frames[0], Image.Image) else False
# convert to opencv format
if from_PIL:
frames = [cv2.cvtColor(np.asarray(frame), cv2.COLOR_RGB2BGR) for frame in frames]
frames = np.asarray(frames, dtype=np.float32)
# testing scale
im_size_min = np.min(frames[0].shape[0:2])
im_size_max = np.max(frames[0].shape[0:2])
resize = float(self.target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size
if np.round(resize * im_size_max) > self.max_size:
resize = float(self.max_size) / float(im_size_max)
resize = 1 if use_origin_size else resize
# resize
if resize != 1:
if not from_PIL:
frames = F.interpolate(frames, scale_factor=resize)
else:
frames = [
cv2.resize(frame, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
for frame in frames
]
# convert to torch.tensor format
if not from_PIL:
frames = frames.transpose(1, 2).transpose(1, 3).contiguous()
else:
frames = frames.transpose((0, 3, 1, 2))
frames = torch.from_numpy(frames)
return frames, resize
def batched_detect_faces(self, frames, conf_threshold=0.8, nms_threshold=0.4, use_origin_size=True):
"""
Arguments:
frames: a list of PIL.Image, or np.array(shape=[n, h, w, c],
type=np.uint8, BGR format).
conf_threshold: confidence threshold.
nms_threshold: nms threshold.
use_origin_size: whether to use origin size.
Returns:
final_bounding_boxes: list of np.array ([n_boxes, 5],
type=np.float32).
final_landmarks: list of np.array ([n_boxes, 10], type=np.float32).
"""
# self.t['forward_pass'].tic()
frames, self.resize = self.batched_transform(frames, use_origin_size)
frames = frames.to(self.device)
frames = frames - self.mean_tensor
b_loc, b_conf, b_landmarks, priors = self.__detect_faces(frames)
final_bounding_boxes, final_landmarks = [], []
# decode
priors = priors.unsqueeze(0)
|
def generate_config(network_name):
cfg_mnet = {
'name': 'mobilenet0.25',
'min_sizes': [[16, 32], [64, 128], [256, 512]],
'steps': [8, 16, 32],
'variance': [0.1, 0.2],
'clip': False,
'loc_weight': 2.0,
'gpu_train': True,
'batch_size': 32,
'ngpu': 1,
'epoch': 250,
'decay1': 190,
'decay2': 220,
'image_size': 640,
'return_layers': {
'stage1': 1,
'stage2': 2,
'stage3': 3
},
'in_channel': 32,
'out_channel': 64
}
cfg_re50 = {
'name': 'Resnet50',
'min_sizes': [[16, 32], [64, 128], [256, 512]],
'steps': [8, 16, 32],
'variance': [0.1, 0.2],
'clip': False,
'loc_weight': 2.0,
'gpu_train': True,
'batch_size': 24,
'ngpu': 4,
'epoch': 100,
'decay1': 70,
'decay2': 90,
'image_size': 840,
'return_layers': {
'layer2': 1,
'layer3': 2,
'layer4': 3
},
'in_channel': 256,
'out_channel': 256
}
if network_name == 'mobile0.25':
return cfg_mnet
elif network_name == 'resnet50':
return cfg_re50
else:
raise NotImplementedError(f'network_name={network_name}')
class RetinaFace(nn.Module):
def __init__(self, network_name='resnet50', half=False, phase='test', device=None):
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device is None else device
super(RetinaFace, self).__init__()
self.half_inference = half
cfg = generate_config(network_name)
self.backbone = cfg['name']
self.model_name = f'retinaface_{network_name}'
self.cfg = cfg
self.phase = phase
self.target_size, self.max_size = 1600, 2150
self.resize, self.scale, self.scale1 = 1., None, None
self.mean_tensor = torch.tensor([[[[104.]], [[117.]], [[123.]]]], device=self.device)
self.reference = get_reference_facial_points(default_square=True)
# Build network.
backbone = None
if cfg['name'] == 'mobilenet0.25':
backbone = MobileNetV1()
self.body = IntermediateLayerGetter(backbone, cfg['return_layers'])
elif cfg['name'] == 'Resnet50':
backbone = models.resnet50(weights=None)
self.body = IntermediateLayerGetter(backbone, cfg['return_layers'])
in_channels_stage2 = cfg['in_channel']
in_channels_list = [
in_channels_stage2 * 2,
in_channels_stage2 * 4,
in_channels_stage2 * 8,
]
out_channels = cfg['out_channel']
self.fpn = FPN(in_channels_list, out_channels)
self.ssh1 = SSH(out_channels, out_channels)
self.ssh2 = SSH(out_channels, out_channels)
self.ssh3 = SSH(out_channels, out_channels)
self.ClassHead = make_class_head(fpn_num=3, inchannels=cfg['out_channel'])
self.BboxHead = make_bbox_head(fpn_num=3, inchannels=cfg['out_channel'])
self.LandmarkHead = make_landmark_head(fpn_num=3, inchannels=cfg['out_channel'])
self.to(self.device)
self.eval()
if self.half_inference:
self.half()
def forward(self, inputs):
out = self.body(inputs)
if self.backbone == 'mobilenet0.25' or self.backbone == 'Resnet50':
out = list(out.values())
# FPN
fpn = self.fpn(out)
# SSH
feature1 = self.ssh1(fpn[0])
feature2 = self.ssh2(fpn[1])
feature3 = self.ssh3(fpn[2])
features = [feature1, feature2, feature3]
bbox_regressions = torch.cat([self.BboxHead[i](feature) for i, feature in enumerate(features)], dim=1)
classifications = torch.cat([self.ClassHead[i](feature) for i, feature in enumerate(features)], dim=1)
tmp = [self.LandmarkHead[i](feature) for i, feature in enumerate(features)]
ldm_regressions = (torch.cat(tmp, dim=1))
if self.phase == 'train':
output = (bbox_regressions, classifications, ldm_regressions)
else:
output = (bbox_regressions, F.softmax(classifications, dim=-1), ldm_regressions)
return output
def __detect_faces(self, inputs):
# get scale
height, width = inputs.shape[2:]
self.scale = torch.tensor([width, height, width, height], dtype=torch.float32, device=self.device)
tmp = [width, height, width, height, width, height, width, height, width, height]
self.scale1 = torch.tensor(tmp, dtype=torch.float32, device=self.device)
# forawrd
inputs = inputs.to(self.device)
if self.half_inference:
inputs = inputs.half()
loc, conf, landmarks = self(inputs)
# get priorbox
priorbox = PriorBox(self.cfg, image_size=inputs.shape[2:])
priors = priorbox.forward().to(self.device)
return loc, conf, landmarks, priors
# single image detection
def transform(self, image, use_origin_size):
# convert to opencv format
if isinstance(image, Image.Image):
image = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
image = image.astype(np.float32)
# testing scale
im_size_min = np.min(image.shape[0:2])
im_size_max = np.max(image.shape[0:2])
resize = float(self.target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size
if np.round(resize * im_size_max) > self.max_size:
resize = float(self.max_size) / float(im_size_max)
resize = 1 if use_origin_size else resize
# resize
if resize != 1:
image = cv2.resize(image, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
# convert to torch.tensor format
# image -= (104, 117, 123)
image = image.transpose(2, 0, 1)
image = torch.from_numpy(image).unsqueeze(0)
return image, resize
def detect_faces(
self,
image,
conf_threshold=0.8,
nms_threshold=0.4,
use_origin_size=True,
):
image, self.resize = self.transform(image, use_origin_size)
image = image.to(self.device)
if self.half_inference:
image = image.half()
image = image - self.mean_tensor
loc, conf, landmarks, priors = self.__detect_faces(image)
boxes = decode(loc.data.squeeze(0), priors.data, self.cfg['variance'])
boxes = boxes * self.scale / self.resize
boxes = boxes.cpu().numpy()
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
landmarks = decode_landm(landmarks.squeeze(0), priors, self.cfg['variance'])
landmarks = landmarks * self.scale1 / self.resize
landmarks = landmarks.cpu().numpy()
# ignore low scores
inds = np.where(scores > conf_threshold)[0]
boxes, landmarks, scores = boxes[inds], landmarks[inds], scores[inds]
# sort
order = scores.argsort()[::-1]
boxes, landmarks, scores = boxes[order], landmarks[order], scores[order]
# do NMS
bounding_boxes = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = py_cpu_nms(bounding_boxes, nms_threshold)
bounding_boxes, landmarks = bounding_boxes[keep, :], landmarks[keep]
# self.t['forward_pass'].toc()
# print(self.t['forward_pass'].average_time)
# import sys
# sys.stdout.flush()
return np.concatenate((bounding_boxes, landmarks), axis=1)
def __align_multi(self, image, boxes, landmarks, limit=None):
if len(boxes) < 1:
return [], []
if limit:
boxes = boxes[:limit]
landmarks = landmarks[:limit]
faces = []
for landmark in landmarks:
facial5points = [[landmark[2 * j], landmark[2 * j + 1]] for j in range(5)]
warped_face = warp_and_crop_face(np.array(image), facial5points, self.reference, crop_size=(112, 112))
faces.append(warped_face)
return np.concatenate((boxes, landmarks), axis=1), faces
def align_multi(self, img, conf_threshold=0.8, limit=None):
rlt = self.detect_faces(img, conf_threshold=conf_threshold)
boxes, landmarks = rlt[:, 0:5], rlt[:, 5:]
return self.__align_multi(img, boxes, landmarks, limit)
# batched detection
def batched_transform(self, frames, use_origin_size):
"""
Arguments:
frames: a list of PIL.Image, or torch.Tensor(shape=[n, h, w, c],
type=np.float32, BGR format).
use_origin_size: whether to use origin size.
"""
from_PIL = True if isinstance(frames[0], Image.Image) else False
# convert to opencv format
if from_PIL:
frames = [cv2.cvtColor(np.asarray(frame), cv2.COLOR_RGB2BGR) for frame in frames]
frames = np.asarray(frames, dtype=np.float32)
# testing scale
im_size_min = np.min(frames[0].shape[0:2])
im_size_max = np.max(frames[0].shape[0:2])
resize = float(self.target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size
if np.round(resize * im_size_max) > self.max_size:
resize = float(self.max_size) / float(im_size_max)
resize = 1 if use_origin_size else resize
# resize
if resize != 1:
if not from_PIL:
frames = F.interpolate(frames, scale_factor=resize)
else:
frames = [
cv2.resize(frame, None, None, fx=resize, fy=resize, interpolation=cv2.INTER_LINEAR)
for frame in frames
]
# convert to torch.tensor format
if not from_PIL:
frames = frames.transpose(1, 2).transpose(1, 3).contiguous()
else:
frames = frames.transpose((0, 3, 1, 2))
frames = torch.from_numpy(frames)
return frames, resize
def batched_detect_faces(self, frames, conf_threshold=0.8, nms_threshold=0.4, use_origin_size=True):
"""
Arguments:
frames: a list of PIL.Image, or np.array(shape=[n, h, w, c],
type=np.uint8, BGR format).
conf_threshold: confidence threshold.
nms_threshold: nms threshold.
use_origin_size: whether to use origin size.
Returns:
final_bounding_boxes: list of np.array ([n_boxes, 5],
type=np.float32).
final_landmarks: list of np.array ([n_boxes, 10], type=np.float32).
"""
# self.t['forward_pass'].tic()
frames, self.resize = self.batched_transform(frames, use_origin_size)
frames = frames.to(self.device)
frames = frames - self.mean_tensor
b_loc, b_conf, b_landmarks, priors = self.__detect_faces(frames)
final_bounding_boxes, final_landmarks = [], []
# decode
priors = priors.unsqueeze(0) | b_loc = batched_decode(b_loc, priors, self.cfg['variance']) * self.scale / self.resize | 9 | 2023-11-25 00:42:32+00:00 | 12k |
DLYuanGod/TinyGPT-V | eval_vqa.py | [
{
"identifier": "OKVQAEvalData",
"path": "minigpt4/datasets/datasets/vqa_datasets.py",
"snippet": "class OKVQAEvalData(torch.utils.data.Dataset):\n def __init__(self, loaded_data, vis_processor, root_path):\n self.loaded_data = loaded_data\n self.root_path = root_path\n self.vis_... | import os
import re
import json
import argparse
import numpy as np
import torch
from collections import defaultdict
from PIL import Image
from tqdm import tqdm
from torch.utils.data import DataLoader
from datasets import load_dataset
from minigpt4.datasets.datasets.vqa_datasets import OKVQAEvalData,VizWizEvalData,IconQAEvalData,GQAEvalData,VSREvalData,HMEvalData
from minigpt4.common.vqa_tools.VQA.PythonHelperTools.vqaTools.vqa import VQA
from minigpt4.common.vqa_tools.VQA.PythonEvaluationTools.vqaEvaluation.vqaEval import VQAEval
from minigpt4.common.eval_utils import prepare_texts, init_model, eval_parser
from minigpt4.conversation.conversation import CONV_VISION_minigptv2
from minigpt4.common.config import Config | 8,987 |
def list_of_str(arg):
return list(map(str, arg.split(',')))
parser = eval_parser()
parser.add_argument("--dataset", type=list_of_str, default='refcoco', help="dataset to evaluate")
args = parser.parse_args()
cfg = Config(args)
model, vis_processor = init_model(args)
conv_temp = CONV_VISION_minigptv2.copy()
conv_temp.system = ""
model.eval()
save_path = cfg.run_cfg.save_path
if 'okvqa' in args.dataset:
eval_file_path = cfg.evaluation_datasets_cfg["okvqa"]["eval_file_path"]
img_path = cfg.evaluation_datasets_cfg["okvqa"]["img_path"]
batch_size = cfg.evaluation_datasets_cfg["okvqa"]["batch_size"]
max_new_tokens = cfg.evaluation_datasets_cfg["okvqa"]["max_new_tokens"]
evaluation_annntation_path = os.path.join(eval_file_path, "okvqa_test_split.json")
with open(evaluation_annntation_path) as f:
ok_vqa_test_split = json.load(f)
|
def list_of_str(arg):
return list(map(str, arg.split(',')))
parser = eval_parser()
parser.add_argument("--dataset", type=list_of_str, default='refcoco', help="dataset to evaluate")
args = parser.parse_args()
cfg = Config(args)
model, vis_processor = init_model(args)
conv_temp = CONV_VISION_minigptv2.copy()
conv_temp.system = ""
model.eval()
save_path = cfg.run_cfg.save_path
if 'okvqa' in args.dataset:
eval_file_path = cfg.evaluation_datasets_cfg["okvqa"]["eval_file_path"]
img_path = cfg.evaluation_datasets_cfg["okvqa"]["img_path"]
batch_size = cfg.evaluation_datasets_cfg["okvqa"]["batch_size"]
max_new_tokens = cfg.evaluation_datasets_cfg["okvqa"]["max_new_tokens"]
evaluation_annntation_path = os.path.join(eval_file_path, "okvqa_test_split.json")
with open(evaluation_annntation_path) as f:
ok_vqa_test_split = json.load(f)
| data = OKVQAEvalData(ok_vqa_test_split, vis_processor, img_path) | 0 | 2023-12-28 05:47:18+00:00 | 12k |
ali-vilab/dreamtalk | inference_for_demo_video.py | [
{
"identifier": "get_cfg_defaults",
"path": "configs/default.py",
"snippet": "def get_cfg_defaults():\n \"\"\"Get a yacs CfgNode object with default values for my_project.\"\"\"\n return _C.clone()"
},
{
"identifier": "DiffusionNet",
"path": "core/networks/diffusion_net.py",
"snipp... | import argparse
import json
import os
import shutil
import subprocess
import numpy as np
import torch
import torchaudio
from scipy.io import loadmat
from transformers import Wav2Vec2Processor
from transformers.models.wav2vec2.modeling_wav2vec2 import Wav2Vec2Model
from configs.default import get_cfg_defaults
from core.networks.diffusion_net import DiffusionNet
from core.networks.diffusion_util import NoisePredictor, VarianceSchedule
from core.utils import (
crop_src_image,
get_pose_params,
get_video_style_clip,
get_wav2vec_audio_window,
)
from generators.utils import get_netG, render_video | 7,797 | style_clip_path, "", style_max_len=256, start_idx=0
)
style_clip = style_clip_raw.unsqueeze(0).to(device)
style_pad_mask = (
style_pad_mask_raw.unsqueeze(0).to(device)
if style_pad_mask_raw is not None
else None
)
gen_exp_stack = diff_net.sample(
audio,
style_clip,
style_pad_mask,
output_dim=cfg.DATASET.FACE3D_DIM,
use_cf_guidance=cfg.CF_GUIDANCE.INFERENCE,
cfg_scale=cfg.CF_GUIDANCE.SCALE,
sample_method=sample_method,
ddim_num_step=ddim_num_step,
)
gen_exp = gen_exp_stack[0].cpu().numpy()
pose_ext = pose_path[-3:]
pose = None
pose = get_pose_params(pose_path)
# (L, 9)
selected_pose = None
if len(pose) >= len(gen_exp):
selected_pose = pose[: len(gen_exp)]
else:
selected_pose = pose[-1].unsqueeze(0).repeat(len(gen_exp), 1)
selected_pose[: len(pose)] = pose
gen_exp_pose = np.concatenate((gen_exp, selected_pose), axis=1)
np.save(output_path, gen_exp_pose)
return output_path
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="inference for demo")
parser.add_argument("--wav_path", type=str, default="", help="path for wav")
parser.add_argument("--image_path", type=str, default="", help="path for image")
parser.add_argument("--disable_img_crop", dest="img_crop", action="store_false")
parser.set_defaults(img_crop=True)
parser.add_argument(
"--style_clip_path", type=str, default="", help="path for style_clip_mat"
)
parser.add_argument("--pose_path", type=str, default="", help="path for pose")
parser.add_argument(
"--max_gen_len",
type=int,
default=1000,
help="The maximum length (seconds) limitation for generating videos",
)
parser.add_argument(
"--cfg_scale",
type=float,
default=1.0,
help="The scale of classifier-free guidance",
)
parser.add_argument(
"--output_name",
type=str,
default="test",
)
parser.add_argument(
"--device",
type=str,
default="cuda",
)
args = parser.parse_args()
if args.device == "cuda" and not torch.cuda.is_available():
print("CUDA is not available, set --device=cpu to use CPU.")
exit(1)
device = torch.device(args.device)
cfg = get_cfg_defaults()
cfg.CF_GUIDANCE.SCALE = args.cfg_scale
cfg.freeze()
tmp_dir = f"tmp/{args.output_name}"
os.makedirs(tmp_dir, exist_ok=True)
# get audio in 16000Hz
wav_16k_path = os.path.join(tmp_dir, f"{args.output_name}_16K.wav")
command = f"ffmpeg -y -i {args.wav_path} -async 1 -ac 1 -vn -acodec pcm_s16le -ar 16000 {wav_16k_path}"
subprocess.run(command.split())
# get wav2vec feat from audio
wav2vec_processor = Wav2Vec2Processor.from_pretrained(
"jonatasgrosman/wav2vec2-large-xlsr-53-english"
)
wav2vec_model = (
Wav2Vec2Model.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-english")
.eval()
.to(device)
)
speech_array, sampling_rate = torchaudio.load(wav_16k_path)
audio_data = speech_array.squeeze().numpy()
inputs = wav2vec_processor(
audio_data, sampling_rate=16_000, return_tensors="pt", padding=True
)
with torch.no_grad():
audio_embedding = wav2vec_model(
inputs.input_values.to(device), return_dict=False
)[0]
audio_feat_path = os.path.join(tmp_dir, f"{args.output_name}_wav2vec.npy")
np.save(audio_feat_path, audio_embedding[0].cpu().numpy())
# get src image
src_img_path = os.path.join(tmp_dir, "src_img.png")
if args.img_crop:
|
@torch.no_grad()
def get_diff_net(cfg, device):
diff_net = DiffusionNet(
cfg=cfg,
net=NoisePredictor(cfg),
var_sched=VarianceSchedule(
num_steps=cfg.DIFFUSION.SCHEDULE.NUM_STEPS,
beta_1=cfg.DIFFUSION.SCHEDULE.BETA_1,
beta_T=cfg.DIFFUSION.SCHEDULE.BETA_T,
mode=cfg.DIFFUSION.SCHEDULE.MODE,
),
)
checkpoint = torch.load(cfg.INFERENCE.CHECKPOINT, map_location=device)
model_state_dict = checkpoint["model_state_dict"]
diff_net_dict = {
k[9:]: v for k, v in model_state_dict.items() if k[:9] == "diff_net."
}
diff_net.load_state_dict(diff_net_dict, strict=True)
diff_net.eval()
return diff_net
@torch.no_grad()
def get_audio_feat(wav_path, output_name, wav2vec_model):
audio_feat_dir = os.path.dirname(audio_feat_path)
pass
@torch.no_grad()
def inference_one_video(
cfg,
audio_path,
style_clip_path,
pose_path,
output_path,
diff_net,
device,
max_audio_len=None,
sample_method="ddim",
ddim_num_step=10,
):
audio_raw = audio_data = np.load(audio_path)
if max_audio_len is not None:
audio_raw = audio_raw[: max_audio_len * 50]
gen_num_frames = len(audio_raw) // 2
audio_win_array = get_wav2vec_audio_window(
audio_raw,
start_idx=0,
num_frames=gen_num_frames,
win_size=cfg.WIN_SIZE,
)
audio_win = torch.tensor(audio_win_array).to(device)
audio = audio_win.unsqueeze(0)
# the second parameter is "" because of bad interface design...
style_clip_raw, style_pad_mask_raw = get_video_style_clip(
style_clip_path, "", style_max_len=256, start_idx=0
)
style_clip = style_clip_raw.unsqueeze(0).to(device)
style_pad_mask = (
style_pad_mask_raw.unsqueeze(0).to(device)
if style_pad_mask_raw is not None
else None
)
gen_exp_stack = diff_net.sample(
audio,
style_clip,
style_pad_mask,
output_dim=cfg.DATASET.FACE3D_DIM,
use_cf_guidance=cfg.CF_GUIDANCE.INFERENCE,
cfg_scale=cfg.CF_GUIDANCE.SCALE,
sample_method=sample_method,
ddim_num_step=ddim_num_step,
)
gen_exp = gen_exp_stack[0].cpu().numpy()
pose_ext = pose_path[-3:]
pose = None
pose = get_pose_params(pose_path)
# (L, 9)
selected_pose = None
if len(pose) >= len(gen_exp):
selected_pose = pose[: len(gen_exp)]
else:
selected_pose = pose[-1].unsqueeze(0).repeat(len(gen_exp), 1)
selected_pose[: len(pose)] = pose
gen_exp_pose = np.concatenate((gen_exp, selected_pose), axis=1)
np.save(output_path, gen_exp_pose)
return output_path
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="inference for demo")
parser.add_argument("--wav_path", type=str, default="", help="path for wav")
parser.add_argument("--image_path", type=str, default="", help="path for image")
parser.add_argument("--disable_img_crop", dest="img_crop", action="store_false")
parser.set_defaults(img_crop=True)
parser.add_argument(
"--style_clip_path", type=str, default="", help="path for style_clip_mat"
)
parser.add_argument("--pose_path", type=str, default="", help="path for pose")
parser.add_argument(
"--max_gen_len",
type=int,
default=1000,
help="The maximum length (seconds) limitation for generating videos",
)
parser.add_argument(
"--cfg_scale",
type=float,
default=1.0,
help="The scale of classifier-free guidance",
)
parser.add_argument(
"--output_name",
type=str,
default="test",
)
parser.add_argument(
"--device",
type=str,
default="cuda",
)
args = parser.parse_args()
if args.device == "cuda" and not torch.cuda.is_available():
print("CUDA is not available, set --device=cpu to use CPU.")
exit(1)
device = torch.device(args.device)
cfg = get_cfg_defaults()
cfg.CF_GUIDANCE.SCALE = args.cfg_scale
cfg.freeze()
tmp_dir = f"tmp/{args.output_name}"
os.makedirs(tmp_dir, exist_ok=True)
# get audio in 16000Hz
wav_16k_path = os.path.join(tmp_dir, f"{args.output_name}_16K.wav")
command = f"ffmpeg -y -i {args.wav_path} -async 1 -ac 1 -vn -acodec pcm_s16le -ar 16000 {wav_16k_path}"
subprocess.run(command.split())
# get wav2vec feat from audio
wav2vec_processor = Wav2Vec2Processor.from_pretrained(
"jonatasgrosman/wav2vec2-large-xlsr-53-english"
)
wav2vec_model = (
Wav2Vec2Model.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-english")
.eval()
.to(device)
)
speech_array, sampling_rate = torchaudio.load(wav_16k_path)
audio_data = speech_array.squeeze().numpy()
inputs = wav2vec_processor(
audio_data, sampling_rate=16_000, return_tensors="pt", padding=True
)
with torch.no_grad():
audio_embedding = wav2vec_model(
inputs.input_values.to(device), return_dict=False
)[0]
audio_feat_path = os.path.join(tmp_dir, f"{args.output_name}_wav2vec.npy")
np.save(audio_feat_path, audio_embedding[0].cpu().numpy())
# get src image
src_img_path = os.path.join(tmp_dir, "src_img.png")
if args.img_crop: | crop_src_image(args.image_path, src_img_path, 0.4) | 4 | 2023-12-28 05:39:31+00:00 | 12k |
jiawei-ren/dreamgaussian4d | diffusers/src/diffusers/schedulers/scheduling_k_dpm_2_ancestral_discrete.py | [
{
"identifier": "ConfigMixin",
"path": "diffusers/src/diffusers/configuration_utils.py",
"snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also\n provides the [`~ConfigMixin.from_config`] and [`~Conf... | import math
import numpy as np
import torch
from collections import defaultdict
from typing import List, Optional, Tuple, Union
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils.torch_utils import randn_tensor
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput | 8,939 | # Copyright 2023 Katherine Crowson, The HuggingFace Team and hlky. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
def betas_for_alpha_bar(
num_diffusion_timesteps,
max_beta=0.999,
alpha_transform_type="cosine",
):
"""
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
(1-beta) over time from t = [0,1].
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
to that part of the diffusion process.
Args:
num_diffusion_timesteps (`int`): the number of betas to produce.
max_beta (`float`): the maximum beta to use; use values lower than 1 to
prevent singularities.
alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
Choose from `cosine` or `exp`
Returns:
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(t):
return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(t):
return math.exp(t * -12.0)
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
return torch.tensor(betas, dtype=torch.float32)
| # Copyright 2023 Katherine Crowson, The HuggingFace Team and hlky. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
def betas_for_alpha_bar(
num_diffusion_timesteps,
max_beta=0.999,
alpha_transform_type="cosine",
):
"""
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
(1-beta) over time from t = [0,1].
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
to that part of the diffusion process.
Args:
num_diffusion_timesteps (`int`): the number of betas to produce.
max_beta (`float`): the maximum beta to use; use values lower than 1 to
prevent singularities.
alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
Choose from `cosine` or `exp`
Returns:
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(t):
return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(t):
return math.exp(t * -12.0)
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
return torch.tensor(betas, dtype=torch.float32)
| class KDPM2AncestralDiscreteScheduler(SchedulerMixin, ConfigMixin): | 4 | 2023-12-28 08:17:40+00:00 | 12k |
FoundationVision/UniRef | projects/UniRef/uniref/models/segment_anything/build_sam.py | [
{
"identifier": "ImageEncoderViT",
"path": "projects/UniRef/uniref/models/segment_anything/modeling/image_encoder.py",
"snippet": "class ImageEncoderViT(nn.Module):\n def __init__(\n self,\n img_size: int = 1024,\n patch_size: int = 16,\n in_chans: int = 3,\n embed_... | from functools import partial
from .modeling import (ImageEncoderViT, MaskDecoder, PromptEncoder, Sam,
TwoWayTransformer)
import torch | 7,329 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def build_sam_vit_h(checkpoint=None):
return _build_sam(
encoder_embed_dim=1280,
encoder_depth=32,
encoder_num_heads=16,
encoder_global_attn_indexes=[7, 15, 23, 31],
checkpoint=checkpoint,
)
build_sam = build_sam_vit_h
def build_sam_vit_l(checkpoint=None):
return _build_sam(
encoder_embed_dim=1024,
encoder_depth=24,
encoder_num_heads=16,
encoder_global_attn_indexes=[5, 11, 17, 23],
checkpoint=checkpoint,
)
def build_sam_vit_b(checkpoint=None):
return _build_sam(
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
encoder_global_attn_indexes=[2, 5, 8, 11],
checkpoint=checkpoint,
)
sam_model_registry = {
"default": build_sam_vit_h,
"vit_h": build_sam_vit_h,
"vit_l": build_sam_vit_l,
"vit_b": build_sam_vit_b,
}
def _build_sam(
encoder_embed_dim,
encoder_depth,
encoder_num_heads,
encoder_global_attn_indexes,
checkpoint=None,
):
prompt_embed_dim = 256
image_size = 1024
vit_patch_size = 16
image_embedding_size = image_size // vit_patch_size
sam = Sam(
image_encoder=ImageEncoderViT(
depth=encoder_depth,
embed_dim=encoder_embed_dim,
img_size=image_size,
mlp_ratio=4,
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
num_heads=encoder_num_heads,
patch_size=vit_patch_size,
qkv_bias=True,
use_rel_pos=True,
global_attn_indexes=encoder_global_attn_indexes,
window_size=14,
out_chans=prompt_embed_dim,
),
prompt_encoder=PromptEncoder(
embed_dim=prompt_embed_dim,
image_embedding_size=(image_embedding_size, image_embedding_size),
input_image_size=(image_size, image_size),
mask_in_chans=16,
),
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def build_sam_vit_h(checkpoint=None):
return _build_sam(
encoder_embed_dim=1280,
encoder_depth=32,
encoder_num_heads=16,
encoder_global_attn_indexes=[7, 15, 23, 31],
checkpoint=checkpoint,
)
build_sam = build_sam_vit_h
def build_sam_vit_l(checkpoint=None):
return _build_sam(
encoder_embed_dim=1024,
encoder_depth=24,
encoder_num_heads=16,
encoder_global_attn_indexes=[5, 11, 17, 23],
checkpoint=checkpoint,
)
def build_sam_vit_b(checkpoint=None):
return _build_sam(
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
encoder_global_attn_indexes=[2, 5, 8, 11],
checkpoint=checkpoint,
)
sam_model_registry = {
"default": build_sam_vit_h,
"vit_h": build_sam_vit_h,
"vit_l": build_sam_vit_l,
"vit_b": build_sam_vit_b,
}
def _build_sam(
encoder_embed_dim,
encoder_depth,
encoder_num_heads,
encoder_global_attn_indexes,
checkpoint=None,
):
prompt_embed_dim = 256
image_size = 1024
vit_patch_size = 16
image_embedding_size = image_size // vit_patch_size
sam = Sam(
image_encoder=ImageEncoderViT(
depth=encoder_depth,
embed_dim=encoder_embed_dim,
img_size=image_size,
mlp_ratio=4,
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
num_heads=encoder_num_heads,
patch_size=vit_patch_size,
qkv_bias=True,
use_rel_pos=True,
global_attn_indexes=encoder_global_attn_indexes,
window_size=14,
out_chans=prompt_embed_dim,
),
prompt_encoder=PromptEncoder(
embed_dim=prompt_embed_dim,
image_embedding_size=(image_embedding_size, image_embedding_size),
input_image_size=(image_size, image_size),
mask_in_chans=16,
), | mask_decoder=MaskDecoder( | 1 | 2023-12-22 13:31:33+00:00 | 12k |
xhuangcv/humannorm | threestudio/systems/base.py | [
{
"identifier": "Exporter",
"path": "threestudio/models/exporters/base.py",
"snippet": "class Exporter(BaseObject):\n @dataclass\n class Config(BaseObject.Config):\n save_video: bool = False\n\n cfg: Config\n\n def configure(\n self,\n geometry: BaseImplicitGeometry,\n ... | import os
import pytorch_lightning as pl
import torch.nn.functional as F
import threestudio
from dataclasses import dataclass, field
from threestudio.models.exporters.base import Exporter, ExporterOutput
from threestudio.systems.utils import parse_optimizer, parse_scheduler
from threestudio.utils.base import Updateable, update_if_possible
from threestudio.utils.config import parse_structured
from threestudio.utils.misc import C, cleanup, get_device, load_module_weights
from threestudio.utils.saving import SaverMixin
from threestudio.utils.typing import *
from threestudio.utils.config import load_config, parse_structured | 10,445 |
class BaseLift3DSystem(BaseSystem):
@dataclass
class Config(BaseSystem.Config):
geometry_type: str = ""
geometry: dict = field(default_factory=dict)
geometry_convert_from: Optional[str] = None
geometry_convert_inherit_texture: bool = False
# used to override configurations of the previous geometry being converted from,
# for example isosurface_threshold
geometry_convert_override: dict = field(default_factory=dict)
material_type: str = ""
material: dict = field(default_factory=dict)
background_type: str = ""
background: dict = field(default_factory=dict)
renderer_type: str = ""
renderer: dict = field(default_factory=dict)
guidance_type: str = ""
guidance: dict = field(default_factory=dict)
guidance_type_add: str = ""
guidance_add: dict = field(default_factory=dict)
prompt_processor_type: str = ""
prompt_processor: dict = field(default_factory=dict)
prompt_processor_type_add: str = ""
prompt_processor_add: dict = field(default_factory=dict)
# geometry export configurations, no need to specify in training
exporter_type: str = "mesh-exporter"
exporter: dict = field(default_factory=dict)
cfg: Config
def configure(self) -> None:
if (
self.cfg.geometry_convert_from # from_coarse must be specified
and not self.cfg.weights # not initialized from coarse when weights are specified
and not self.resumed # not initialized from coarse when resumed from checkpoints
):
threestudio.info("Initializing geometry from a given checkpoint ...")
prev_cfg = load_config(
os.path.join(
os.path.dirname(self.cfg.geometry_convert_from),
"../configs/parsed.yaml",
)
) # TODO: hard-coded relative path
prev_system_cfg: BaseLift3DSystem.Config = parse_structured(
self.Config, prev_cfg.system
)
prev_geometry_cfg = prev_system_cfg.geometry
prev_geometry_cfg.update(self.cfg.geometry_convert_override)
prev_geometry = threestudio.find(prev_system_cfg.geometry_type)(
prev_geometry_cfg
)
state_dict, epoch, global_step = load_module_weights(
self.cfg.geometry_convert_from,
module_name="geometry",
map_location="cpu",
)
prev_geometry.load_state_dict(state_dict, strict=False)
# restore step-dependent states
prev_geometry.do_update_step(epoch, global_step, on_load_weights=True)
# convert from coarse stage geometry
prev_geometry = prev_geometry.to(get_device())
self.geometry = threestudio.find(self.cfg.geometry_type).create_from(
prev_geometry,
self.cfg.geometry,
copy_net=self.cfg.geometry_convert_inherit_texture,
)
del prev_geometry
cleanup()
else:
self.geometry = threestudio.find(self.cfg.geometry_type)(self.cfg.geometry)
self.material = threestudio.find(self.cfg.material_type)(self.cfg.material)
self.background = threestudio.find(self.cfg.background_type)(
self.cfg.background
)
self.renderer = threestudio.find(self.cfg.renderer_type)(
self.cfg.renderer,
geometry=self.geometry,
material=self.material,
background=self.background,
)
def on_fit_start(self) -> None:
if self._save_dir is not None:
threestudio.info(f"Validation results will be saved to {self._save_dir}")
else:
threestudio.warn(
f"Saving directory not set for the system, visualization results will not be saved"
)
def on_test_end(self) -> None:
if self._save_dir is not None:
threestudio.info(f"Test results saved to {self._save_dir}")
def on_predict_start(self) -> None:
self.exporter: Exporter = threestudio.find(self.cfg.exporter_type)(
self.cfg.exporter,
geometry=self.geometry,
material=self.material,
background=self.background,
)
def predict_step(self, batch, batch_idx):
if self.exporter.cfg.save_video:
self.test_step(batch, batch_idx)
def on_predict_epoch_end(self) -> None:
if self.exporter.cfg.save_video:
self.on_test_epoch_end()
|
class BaseSystem(pl.LightningModule, Updateable, SaverMixin):
@dataclass
class Config:
loggers: dict = field(default_factory=dict)
loss: dict = field(default_factory=dict)
optimizer: dict = field(default_factory=dict)
scheduler: Optional[dict] = None
weights: Optional[str] = None
weights_ignore_modules: Optional[List[str]] = None
cleanup_after_validation_step: bool = False
cleanup_after_test_step: bool = False
cfg: Config
def __init__(self, cfg, resumed=False) -> None:
super().__init__()
self.cfg = parse_structured(self.Config, cfg)
self._save_dir: Optional[str] = None
self._resumed: bool = resumed
self._resumed_eval: bool = False
self._resumed_eval_status: dict = {"global_step": 0, "current_epoch": 0}
if "loggers" in cfg:
self.create_loggers(cfg.loggers)
self.configure()
if self.cfg.weights is not None:
self.load_weights(self.cfg.weights, self.cfg.weights_ignore_modules)
self.post_configure()
def load_weights(self, weights: str, ignore_modules: Optional[List[str]] = None):
state_dict, epoch, global_step = load_module_weights(
weights, ignore_modules=ignore_modules, map_location="cpu"
)
self.load_state_dict(state_dict, strict=False)
# restore step-dependent states
self.do_update_step(epoch, global_step, on_load_weights=True)
def set_resume_status(self, current_epoch: int, global_step: int):
# restore correct epoch and global step in eval
self._resumed_eval = True
self._resumed_eval_status["current_epoch"] = current_epoch
self._resumed_eval_status["global_step"] = global_step
@property
def resumed(self):
# whether from resumed checkpoint
return self._resumed
@property
def true_global_step(self):
if self._resumed_eval:
return self._resumed_eval_status["global_step"]
else:
return self.global_step
@property
def true_current_epoch(self):
if self._resumed_eval:
return self._resumed_eval_status["current_epoch"]
else:
return self.current_epoch
def configure(self) -> None:
pass
def post_configure(self) -> None:
"""
executed after weights are loaded
"""
pass
def C(self, value: Any) -> float:
return C(value, self.true_current_epoch, self.true_global_step)
def configure_optimizers(self):
optim = parse_optimizer(self.cfg.optimizer, self)
ret = {
"optimizer": optim,
}
if self.cfg.scheduler is not None:
ret.update(
{
"lr_scheduler": parse_scheduler(self.cfg.scheduler, optim),
}
)
return ret
def training_step(self, batch, batch_idx):
raise NotImplementedError
def validation_step(self, batch, batch_idx):
raise NotImplementedError
def on_validation_batch_end(self, outputs, batch, batch_idx):
if self.cfg.cleanup_after_validation_step:
# cleanup to save vram
cleanup()
def on_validation_epoch_end(self):
raise NotImplementedError
def test_step(self, batch, batch_idx):
raise NotImplementedError
def on_test_batch_end(self, outputs, batch, batch_idx):
if self.cfg.cleanup_after_test_step:
# cleanup to save vram
cleanup()
def on_test_epoch_end(self):
pass
def predict_step(self, batch, batch_idx):
raise NotImplementedError
def on_predict_batch_end(self, outputs, batch, batch_idx):
if self.cfg.cleanup_after_test_step:
# cleanup to save vram
cleanup()
def on_predict_epoch_end(self):
pass
def preprocess_data(self, batch, stage):
pass
"""
Implementing on_after_batch_transfer of DataModule does the same.
But on_after_batch_transfer does not support DP.
"""
def on_train_batch_start(self, batch, batch_idx, unused=0):
self.preprocess_data(batch, "train")
self.dataset = self.trainer.train_dataloader.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def on_validation_batch_start(self, batch, batch_idx, dataloader_idx=0):
self.preprocess_data(batch, "validation")
self.dataset = self.trainer.val_dataloaders.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def on_test_batch_start(self, batch, batch_idx, dataloader_idx=0):
self.preprocess_data(batch, "test")
self.dataset = self.trainer.test_dataloaders.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def on_predict_batch_start(self, batch, batch_idx, dataloader_idx=0):
self.preprocess_data(batch, "predict")
self.dataset = self.trainer.predict_dataloaders.dataset
update_if_possible(self.dataset, self.true_current_epoch, self.true_global_step)
self.do_update_step(self.true_current_epoch, self.true_global_step)
def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):
pass
def on_before_optimizer_step(self, optimizer):
"""
# some gradient-related debugging goes here, example:
from lightning.pytorch.utilities import grad_norm
norms = grad_norm(self.geometry, norm_type=2)
print(norms)
"""
pass
class BaseLift3DSystem(BaseSystem):
@dataclass
class Config(BaseSystem.Config):
geometry_type: str = ""
geometry: dict = field(default_factory=dict)
geometry_convert_from: Optional[str] = None
geometry_convert_inherit_texture: bool = False
# used to override configurations of the previous geometry being converted from,
# for example isosurface_threshold
geometry_convert_override: dict = field(default_factory=dict)
material_type: str = ""
material: dict = field(default_factory=dict)
background_type: str = ""
background: dict = field(default_factory=dict)
renderer_type: str = ""
renderer: dict = field(default_factory=dict)
guidance_type: str = ""
guidance: dict = field(default_factory=dict)
guidance_type_add: str = ""
guidance_add: dict = field(default_factory=dict)
prompt_processor_type: str = ""
prompt_processor: dict = field(default_factory=dict)
prompt_processor_type_add: str = ""
prompt_processor_add: dict = field(default_factory=dict)
# geometry export configurations, no need to specify in training
exporter_type: str = "mesh-exporter"
exporter: dict = field(default_factory=dict)
cfg: Config
def configure(self) -> None:
if (
self.cfg.geometry_convert_from # from_coarse must be specified
and not self.cfg.weights # not initialized from coarse when weights are specified
and not self.resumed # not initialized from coarse when resumed from checkpoints
):
threestudio.info("Initializing geometry from a given checkpoint ...")
prev_cfg = load_config(
os.path.join(
os.path.dirname(self.cfg.geometry_convert_from),
"../configs/parsed.yaml",
)
) # TODO: hard-coded relative path
prev_system_cfg: BaseLift3DSystem.Config = parse_structured(
self.Config, prev_cfg.system
)
prev_geometry_cfg = prev_system_cfg.geometry
prev_geometry_cfg.update(self.cfg.geometry_convert_override)
prev_geometry = threestudio.find(prev_system_cfg.geometry_type)(
prev_geometry_cfg
)
state_dict, epoch, global_step = load_module_weights(
self.cfg.geometry_convert_from,
module_name="geometry",
map_location="cpu",
)
prev_geometry.load_state_dict(state_dict, strict=False)
# restore step-dependent states
prev_geometry.do_update_step(epoch, global_step, on_load_weights=True)
# convert from coarse stage geometry
prev_geometry = prev_geometry.to(get_device())
self.geometry = threestudio.find(self.cfg.geometry_type).create_from(
prev_geometry,
self.cfg.geometry,
copy_net=self.cfg.geometry_convert_inherit_texture,
)
del prev_geometry
cleanup()
else:
self.geometry = threestudio.find(self.cfg.geometry_type)(self.cfg.geometry)
self.material = threestudio.find(self.cfg.material_type)(self.cfg.material)
self.background = threestudio.find(self.cfg.background_type)(
self.cfg.background
)
self.renderer = threestudio.find(self.cfg.renderer_type)(
self.cfg.renderer,
geometry=self.geometry,
material=self.material,
background=self.background,
)
def on_fit_start(self) -> None:
if self._save_dir is not None:
threestudio.info(f"Validation results will be saved to {self._save_dir}")
else:
threestudio.warn(
f"Saving directory not set for the system, visualization results will not be saved"
)
def on_test_end(self) -> None:
if self._save_dir is not None:
threestudio.info(f"Test results saved to {self._save_dir}")
def on_predict_start(self) -> None:
self.exporter: Exporter = threestudio.find(self.cfg.exporter_type)(
self.cfg.exporter,
geometry=self.geometry,
material=self.material,
background=self.background,
)
def predict_step(self, batch, batch_idx):
if self.exporter.cfg.save_video:
self.test_step(batch, batch_idx)
def on_predict_epoch_end(self) -> None:
if self.exporter.cfg.save_video:
self.on_test_epoch_end() | exporter_output: List[ExporterOutput] = self.exporter() | 1 | 2023-12-23 12:37:48+00:00 | 12k |
jesenzhang/ComfyUI_StreamDiffusion | streamdiffusion/wrapper.py | [
{
"identifier": "StreamDiffusion",
"path": "streamdiffusion/pipeline.py",
"snippet": "class StreamDiffusion:\n def __init__(\n self,\n pipe: StableDiffusionPipeline,\n t_index_list: List[int],\n torch_dtype: torch.dtype = torch.float16,\n width: int = 512,\n ... | import gc
import os
import traceback
import numpy as np
import torch
from pathlib import Path
from typing import List, Literal, Optional, Union, Dict
from diffusers import AutoencoderTiny, StableDiffusionPipeline
from PIL import Image
from .pipeline import StreamDiffusion
from .image_utils import postprocess_image
from transformers import CLIPFeatureExtractor
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
)
from polygraphy import cuda
from streamdiffusion.acceleration.tensorrt import (
TorchVAEEncoder,
compile_unet,
compile_vae_decoder,
compile_vae_encoder,
)
from streamdiffusion.acceleration.tensorrt.engine import (
AutoencoderKLEngine,
UNet2DConditionModelEngine,
)
from streamdiffusion.acceleration.tensorrt.models import (
VAE,
UNet,
VAEEncoder,
)
from streamdiffusion.acceleration.sfast import (
accelerate_with_stable_fast,
)
from transformers import CLIPFeatureExtractor
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
) | 8,713 | """
# self.stream.prepare(
# prompt,
# negative_prompt,
# num_inference_steps=num_inference_steps,
# guidance_scale=guidance_scale,
# delta=delta,
# )
self.prompt =prompt
self.negative_prompt=negative_prompt
self.num_inference_steps=num_inference_steps
self.guidance_scale=guidance_scale
self.delta=delta
self.frame_buffer_size = frame_buffer_size
self.batch_size = (
len(t_index_list) * frame_buffer_size
if use_denoising_batch
else frame_buffer_size
)
self.t_index_list =t_index_list
self.cfg_type =cfg_type
self.use_denoising_batch = use_denoising_batch
self.use_safety_checker = use_safety_checker
self.do_add_noise =do_add_noise
self.seed=seed
if enable_similar_image_filter:
self.stream.enable_similar_image_filter(similar_image_filter_threshold, similar_image_filter_max_skip_frame)
else:
self.stream.disable_similar_image_filter()
if self.use_safety_checker:
if self.safety_checker==None or self.feature_extractor==None:
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(
"CompVis/stable-diffusion-safety-checker"
).to(self.stream.device)
self.feature_extractor = CLIPFeatureExtractor.from_pretrained(
"openai/clip-vit-base-patch32"
)
self.nsfw_fallback_img = Image.new("RGB", (512, 512), (0, 0, 0))
def __call__(
self,
image: Optional[Union[str, Image.Image, torch.Tensor]] = None,
prompt: Optional[str] = None,
) -> Union[Image.Image, List[Image.Image]]:
"""
Performs img2img or txt2img based on the mode.
Parameters
----------
image : Optional[Union[str, Image.Image, torch.Tensor]]
The image to generate from.
prompt : Optional[str]
The prompt to generate images from.
Returns
-------
Union[Image.Image, List[Image.Image]]
The generated image.
"""
if self.mode == "img2img":
return self.img2img(image)
else:
return self.txt2img(prompt)
def sample(self, image: Optional[Union[str, Image.Image, torch.Tensor]] = None,
prompt: Optional[str] = None,negative_prompt: Optional[str] = None)-> List[Image.Image]:
use_denoising_batch=self.use_denoising_batch
if not image == None:
#图生图
if isinstance(image, str) or isinstance(image, Image.Image):
image = self.preprocess_image(image)
use_denoising_batch = True
self.stream.set_sampler_param(t_index_list=self.t_index_list,
width=self.width,
height=self.height,
do_add_noise=self.do_add_noise,
use_denoising_batch=use_denoising_batch,
frame_buffer_size=self.frame_buffer_size,
cfg_type=self.cfg_type)
else:
#文生图
if self.frame_buffer_size >1 and self.use_denoising_batch:
use_denoising_batch = False
self.stream.set_sampler_param(t_index_list=self.t_index_list,
width=self.width,
height=self.height,
do_add_noise=self.do_add_noise,
use_denoising_batch=use_denoising_batch,
frame_buffer_size=self.frame_buffer_size,
cfg_type='none')
self.stream.prepare(
prompt=self.prompt,
negative_prompt=self.negative_prompt,
num_inference_steps=self.num_inference_steps,
guidance_scale=self.guidance_scale,
delta=self.delta,
seed=self.seed,
)
if prompt is not None:
self.stream.update_prompt(prompt,negative_prompt)
self.batch_size = (
len(self.t_index_list) * self.frame_buffer_size
if use_denoising_batch
else self.frame_buffer_size
)
if self.frame_buffer_size==1:
for _ in range(self.batch_size):
self.stream.sample(image)
image_tensor = self.stream.sample(image)
|
torch.set_grad_enabled(False)
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
class StreamDiffusionWrapper:
def __init__(
self,
model_id_or_path: str,
t_index_list: List[int],
lora_dict: Optional[Dict[str, float]] = None,
mode: Literal["img2img", "txt2img"] = "img2img",
output_type: Literal["pil", "pt", "np", "latent"] = "pil",
lcm_lora_id: Optional[str] = None,
vae_id: Optional[str] = None,
device: Literal["cpu", "cuda"] = "cuda",
dtype: torch.dtype = torch.float16,
frame_buffer_size: int = 1,
width: int = 512,
height: int = 512,
warmup: int = 10,
acceleration: Literal["none", "xformers", "tensorrt"] = "tensorrt",
do_add_noise: bool = True,
device_ids: Optional[List[int]] = None,
use_lcm_lora: bool = True,
use_tiny_vae: bool = True,
enable_similar_image_filter: bool = False,
similar_image_filter_threshold: float = 0.98,
similar_image_filter_max_skip_frame: int = 10,
use_denoising_batch: bool = True,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
seed: int = 2,
use_safety_checker: bool = False,
):
"""
Initializes the StreamDiffusionWrapper.
Parameters
----------
model_id_or_path : str
The model id or path to load.
t_index_list : List[int]
The t_index_list to use for inference.
lora_dict : Optional[Dict[str, float]], optional
The lora_dict to load, by default None.
Keys are the LoRA names and values are the LoRA scales.
Example: {'LoRA_1' : 0.5 , 'LoRA_2' : 0.7 ,...}
mode : Literal["img2img", "txt2img"], optional
txt2img or img2img, by default "img2img".
output_type : Literal["pil", "pt", "np", "latent"], optional
The output type of image, by default "pil".
lcm_lora_id : Optional[str], optional
The lcm_lora_id to load, by default None.
If None, the default LCM-LoRA
("latent-consistency/lcm-lora-sdv1-5") will be used.
vae_id : Optional[str], optional
The vae_id to load, by default None.
If None, the default TinyVAE
("madebyollin/taesd") will be used.
device : Literal["cpu", "cuda"], optional
The device to use for inference, by default "cuda".
dtype : torch.dtype, optional
The dtype for inference, by default torch.float16.
frame_buffer_size : int, optional
The frame buffer size for denoising batch, by default 1.
width : int, optional
The width of the image, by default 512.
height : int, optional
The height of the image, by default 512.
warmup : int, optional
The number of warmup steps to perform, by default 10.
acceleration : Literal["none", "xformers", "tensorrt"], optional
The acceleration method, by default "tensorrt".
do_add_noise : bool, optional
Whether to add noise for following denoising steps or not,
by default True.
device_ids : Optional[List[int]], optional
The device ids to use for DataParallel, by default None.
use_lcm_lora : bool, optional
Whether to use LCM-LoRA or not, by default True.
use_tiny_vae : bool, optional
Whether to use TinyVAE or not, by default True.
enable_similar_image_filter : bool, optional
Whether to enable similar image filter or not,
by default False.
similar_image_filter_threshold : float, optional
The threshold for similar image filter, by default 0.98.
similar_image_filter_max_skip_frame : int, optional
The max skip frame for similar image filter, by default 10.
use_denoising_batch : bool, optional
Whether to use denoising batch or not, by default True.
cfg_type : Literal["none", "full", "self", "initialize"],
optional
The cfg_type for img2img mode, by default "self".
You cannot use anything other than "none" for txt2img mode.
seed : int, optional
The seed, by default 2.
use_safety_checker : bool, optional
Whether to use safety checker or not, by default False.
"""
self.sd_turbo = "turbo" in model_id_or_path
if mode == "txt2img":
if cfg_type != "none":
raise ValueError(
f"txt2img mode accepts only cfg_type = 'none', but got {cfg_type}"
)
if use_denoising_batch and frame_buffer_size > 1:
if not self.sd_turbo:
raise ValueError(
"txt2img mode cannot use denoising batch with frame_buffer_size > 1."
)
if mode == "img2img":
if not use_denoising_batch:
raise NotImplementedError(
"img2img mode must use denoising batch for now."
)
self.device = device
self.dtype = dtype
self.width = width
self.height = height
self.mode = mode
self.output_type = output_type
self.frame_buffer_size = frame_buffer_size
self.batch_size = (
len(t_index_list) * frame_buffer_size
if use_denoising_batch
else frame_buffer_size
)
self.t_index_list =t_index_list
self.cfg_type =cfg_type
self.use_denoising_batch = use_denoising_batch
self.use_safety_checker = use_safety_checker
self.do_add_noise =do_add_noise
self.seed=seed
self.stream: StreamDiffusion = self._load_model(
model_id_or_path=model_id_or_path,
lora_dict=lora_dict,
lcm_lora_id=lcm_lora_id,
vae_id=vae_id,
t_index_list=t_index_list,
acceleration=acceleration,
warmup=warmup,
do_add_noise=do_add_noise,
use_lcm_lora=use_lcm_lora,
use_tiny_vae=use_tiny_vae,
cfg_type=cfg_type,
seed=seed,
)
if device_ids is not None:
self.stream.unet = torch.nn.DataParallel(
self.stream.unet, device_ids=device_ids
)
if enable_similar_image_filter:
self.stream.enable_similar_image_filter(similar_image_filter_threshold, similar_image_filter_max_skip_frame)
def prepare(
self,
prompt: str,
negative_prompt: str = "",
num_inference_steps: int = 50,
guidance_scale: float = 1.2,
delta: float = 1.0,
t_index_list: List[int]=[16,32,45],
do_add_noise: bool = True,
enable_similar_image_filter: bool = False,
similar_image_filter_threshold: float = 0.98,
similar_image_filter_max_skip_frame: int = 10,
use_denoising_batch: bool = True,
cfg_type: Literal["none", "full", "self", "initialize"] = "self",
seed: int = 2,
frame_buffer_size:int=1,
use_safety_checker: bool = False,
) -> None:
"""
Prepares the model for inference.
Parameters
----------
prompt : str
The prompt to generate images from.
num_inference_steps : int, optional
The number of inference steps to perform, by default 50.
guidance_scale : float, optional
The guidance scale to use, by default 1.2.
delta : float, optional
The delta multiplier of virtual residual noise,
by default 1.0.
"""
# self.stream.prepare(
# prompt,
# negative_prompt,
# num_inference_steps=num_inference_steps,
# guidance_scale=guidance_scale,
# delta=delta,
# )
self.prompt =prompt
self.negative_prompt=negative_prompt
self.num_inference_steps=num_inference_steps
self.guidance_scale=guidance_scale
self.delta=delta
self.frame_buffer_size = frame_buffer_size
self.batch_size = (
len(t_index_list) * frame_buffer_size
if use_denoising_batch
else frame_buffer_size
)
self.t_index_list =t_index_list
self.cfg_type =cfg_type
self.use_denoising_batch = use_denoising_batch
self.use_safety_checker = use_safety_checker
self.do_add_noise =do_add_noise
self.seed=seed
if enable_similar_image_filter:
self.stream.enable_similar_image_filter(similar_image_filter_threshold, similar_image_filter_max_skip_frame)
else:
self.stream.disable_similar_image_filter()
if self.use_safety_checker:
if self.safety_checker==None or self.feature_extractor==None:
self.safety_checker = StableDiffusionSafetyChecker.from_pretrained(
"CompVis/stable-diffusion-safety-checker"
).to(self.stream.device)
self.feature_extractor = CLIPFeatureExtractor.from_pretrained(
"openai/clip-vit-base-patch32"
)
self.nsfw_fallback_img = Image.new("RGB", (512, 512), (0, 0, 0))
def __call__(
self,
image: Optional[Union[str, Image.Image, torch.Tensor]] = None,
prompt: Optional[str] = None,
) -> Union[Image.Image, List[Image.Image]]:
"""
Performs img2img or txt2img based on the mode.
Parameters
----------
image : Optional[Union[str, Image.Image, torch.Tensor]]
The image to generate from.
prompt : Optional[str]
The prompt to generate images from.
Returns
-------
Union[Image.Image, List[Image.Image]]
The generated image.
"""
if self.mode == "img2img":
return self.img2img(image)
else:
return self.txt2img(prompt)
def sample(self, image: Optional[Union[str, Image.Image, torch.Tensor]] = None,
prompt: Optional[str] = None,negative_prompt: Optional[str] = None)-> List[Image.Image]:
use_denoising_batch=self.use_denoising_batch
if not image == None:
#图生图
if isinstance(image, str) or isinstance(image, Image.Image):
image = self.preprocess_image(image)
use_denoising_batch = True
self.stream.set_sampler_param(t_index_list=self.t_index_list,
width=self.width,
height=self.height,
do_add_noise=self.do_add_noise,
use_denoising_batch=use_denoising_batch,
frame_buffer_size=self.frame_buffer_size,
cfg_type=self.cfg_type)
else:
#文生图
if self.frame_buffer_size >1 and self.use_denoising_batch:
use_denoising_batch = False
self.stream.set_sampler_param(t_index_list=self.t_index_list,
width=self.width,
height=self.height,
do_add_noise=self.do_add_noise,
use_denoising_batch=use_denoising_batch,
frame_buffer_size=self.frame_buffer_size,
cfg_type='none')
self.stream.prepare(
prompt=self.prompt,
negative_prompt=self.negative_prompt,
num_inference_steps=self.num_inference_steps,
guidance_scale=self.guidance_scale,
delta=self.delta,
seed=self.seed,
)
if prompt is not None:
self.stream.update_prompt(prompt,negative_prompt)
self.batch_size = (
len(self.t_index_list) * self.frame_buffer_size
if use_denoising_batch
else self.frame_buffer_size
)
if self.frame_buffer_size==1:
for _ in range(self.batch_size):
self.stream.sample(image)
image_tensor = self.stream.sample(image) | image = postprocess_image(image_tensor.cpu(), output_type=self.output_type) | 1 | 2023-12-29 09:00:03+00:00 | 12k |
neobundy/MLX-Stable-Diffusion-WebUI | stable_diffusion/model_io.py | [
{
"identifier": "CLIPTextModel",
"path": "stable_diffusion/clip.py",
"snippet": "class CLIPTextModel(nn.Module):\n \"\"\"Implements the text encoder transformer from CLIP.\"\"\"\n\n def __init__(self, config: CLIPTextModelConfig):\n super().__init__()\n\n self.token_embedding = nn.Em... | from typing import Optional
from functools import partial
from huggingface_hub import hf_hub_download
from mlx.utils import tree_unflatten
from safetensors import safe_open as safetensor_open
from .clip import CLIPTextModel
from .config import AutoencoderConfig, CLIPTextModelConfig, DiffusionConfig, UNetConfig
from .tokenizer import Tokenizer
from .unet import UNetModel
from .vae import Autoencoder
from .models import _DEFAULT_MODEL, _MODELS
from .config import DiffuserModelPathConfig
from tqdm import tqdm
import json
import mlx.core as mx
import numpy as np | 7,393 | key = key.replace("mid_block.resnets.0", "mid_blocks.0")
_debug_print(f"Replaced 'mid_block.resnets.0' with 'mid_blocks.0' in {key}")
if "mid_block.attentions.0" in key:
key = key.replace("mid_block.attentions.0", "mid_blocks.1")
_debug_print(f"Replaced 'mid_block.attentions.0' with 'mid_blocks.1' in {key}")
if "mid_block.resnets.1" in key:
key = key.replace("mid_block.resnets.1", "mid_blocks.2")
_debug_print(f"Replaced 'mid_block.resnets.1' with 'mid_blocks.2' in {key}")
# Map the quant/post_quant layers
if "quant_conv" in key:
key = key.replace("quant_conv", "quant_proj")
value = value.squeeze()
_debug_print(f"Replaced 'quant_conv' with 'quant_proj' and squeezed value in {key}")
# Map the conv_shortcut to linear
if "conv_shortcut.weight" in key:
value = value.squeeze()
_debug_print(f"Squeezed 'conv_shortcut.weight' in {key}")
# Rearrange the dimensions to [B, H, W, C] - Autoencoder expects: B, H, W, C = x.shape
if len(value.shape) == 4:
value = value.transpose(0, 2, 3, 1)
_debug_print(f"Transposed dimensions in {key}")
return [(key, _from_numpy(value))]
def _flatten(params):
return [(k, v) for p in params for (k, v) in p]
# The weights of the model can be loaded as 16-bit floating point numbers, which is a form of quantization known as half-precision floating point.
# This can reduce the memory requirements of the model by half compared to 32-bit floating point numbers, at the cost of reduced numerical precision.
def _load_safetensor_weights(mapper, model, weight_file, float16: bool = False):
dtype = np.float16 if float16 else np.float32
_debug_print(f"Loading weights from {weight_file}")
with safetensor_open(weight_file, framework="numpy") as f:
keys = list(f.keys())
weights = _flatten([mapper(k, f.get_tensor(k).astype(dtype)) for k in tqdm(keys, desc=f"Loading weights from {weight_file}...")])
model.update(tree_unflatten(weights))
def _check_key(key: str, part: str):
if key not in _MODELS:
raise ValueError(
f"[{part}] '{key}' model not found, choose one of {{{','.join(_MODELS.keys())}}}"
)
def load_unet(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion UNet from Hugging Face Hub."""
_check_key(key, "load_unet")
# Download the config and create the model
unet_config = _MODELS[key]["unet_config"]
with open(hf_hub_download(key, unet_config)) as f:
config = json.load(f)
n_blocks = len(config["block_out_channels"])
model = UNetModel(
UNetConfig(
in_channels=config["in_channels"],
out_channels=config["out_channels"],
block_out_channels=config["block_out_channels"],
layers_per_block=[config["layers_per_block"]] * n_blocks,
num_attention_heads=[config["attention_head_dim"]] * n_blocks
if isinstance(config["attention_head_dim"], int)
else config["attention_head_dim"],
cross_attention_dim=[config["cross_attention_dim"]] * n_blocks,
norm_num_groups=config["norm_num_groups"],
)
)
# Download the weights and map them into the model
unet_weights = _MODELS[key]["unet"]
weight_file = hf_hub_download(key, unet_weights)
_load_safetensor_weights(map_unet_weights, model, weight_file, float16)
return model
def load_text_encoder(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion text encoder from Hugging Face Hub."""
_check_key(key, "load_text_encoder")
# Download the config and create the model
text_encoder_config = _MODELS[key]["text_encoder_config"]
with open(hf_hub_download(key, text_encoder_config)) as f:
config = json.load(f)
model = CLIPTextModel(
CLIPTextModelConfig(
num_layers=config["num_hidden_layers"],
model_dims=config["hidden_size"],
num_heads=config["num_attention_heads"],
max_length=config["max_position_embeddings"],
vocab_size=config["vocab_size"],
)
)
# Download the weights and map them into the model
text_encoder_weights = _MODELS[key]["text_encoder"]
weight_file = hf_hub_download(key, text_encoder_weights)
_load_safetensor_weights(map_clip_text_encoder_weights, model, weight_file, float16)
return model
def load_autoencoder(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion autoencoder from Hugging Face Hub."""
_check_key(key, "load_autoencoder")
# Download the config and create the model
vae_config = _MODELS[key]["vae_config"]
with open(hf_hub_download(key, vae_config)) as f:
config = json.load(f)
| # Copyright © 2023 Apple Inc.
logfile = 'log.txt'
_DEBUG = False
def _debug_print(*args, **kwargs):
if _DEBUG:
# Convert the arguments to a string
message = ' '.join(map(str, args))
# Print the message to the console
print(message, **kwargs)
# Open the log file in append mode and write the message
with open(logfile, 'a') as f:
f.write(message + '\n')
def _from_numpy(x):
return mx.array(np.ascontiguousarray(x))
# The `map_*_weights` functions are used to adjust the weights of a model when loading it from a file.
# The weights of the model in the file might be in a different format than the weights of the model in the current codebase.
# When you load a pre-trained model, the weights are stored in a dictionary where the keys are the names of the parameters in the model.
# If the architecture of your model is different from the architecture of the model that the weights were trained on, you might need to adjust the keys and/or the weights to match your model's architecture.
# This is what the `map_*_weights` functions are doing. They are adjusting the keys and the weights to match the architecture of the models in the current codebase.
def map_unet_weights(key, value):
# Map up/downsampling
if "downsamplers" in key:
key = key.replace("downsamplers.0.conv", "downsample")
_debug_print(f"Replaced 'downsamplers.0.conv' with 'downsample' in {key}")
if "upsamplers" in key:
key = key.replace("upsamplers.0.conv", "upsample")
_debug_print(f"Replaced 'upsamplers.0.conv' with 'upsample' in {key}")
# Map the mid block
if "mid_block.resnets.0" in key:
key = key.replace("mid_block.resnets.0", "mid_blocks.0")
_debug_print(f"Replaced 'mid_block.resnets.0' with 'mid_blocks.0' in {key}")
if "mid_block.attentions.0" in key:
key = key.replace("mid_block.attentions.0", "mid_blocks.1")
_debug_print(f"Replaced 'mid_block.attentions.0' with 'mid_blocks.1' in {key}")
if "mid_block.resnets.1" in key:
key = key.replace("mid_block.resnets.1", "mid_blocks.2")
_debug_print(f"Replaced 'mid_block.resnets.1' with 'mid_blocks.2' in {key}")
# Map attention layers
if "to_k" in key:
key = key.replace("to_k", "key_proj")
_debug_print(f"Replaced 'to_k' with 'key_proj' in {key}")
if "to_out.0" in key:
key = key.replace("to_out.0", "out_proj")
_debug_print(f"Replaced 'to_out.0' with 'out_proj' in {key}")
if "to_q" in key:
key = key.replace("to_q", "query_proj")
_debug_print(f"Replaced 'to_q' with 'query_proj' in {key}")
if "to_v" in key:
key = key.replace("to_v", "value_proj")
_debug_print(f"Replaced 'to_v' with 'value_proj' in {key}")
# Map transformer ffn
if "ff.net.2" in key:
key = key.replace("ff.net.2", "linear3")
_debug_print(f"Replaced 'ff.net.2' with 'linear3' in {key}")
if "ff.net.0" in key:
k1 = key.replace("ff.net.0.proj", "linear1")
k2 = key.replace("ff.net.0.proj", "linear2")
v1, v2 = np.split(value, 2)
_debug_print(f"Replaced 'ff.net.0.proj' with 'linear1' and 'linear2' in {key}")
return [(k1, _from_numpy(v1)), (k2, _from_numpy(v2))]
# The weights of this 1x1 convolutional layer would be a 4-dimensional tensor
# with shape [out_channels, in_channels, 1, 1].
# The squeeze() function is used to remove the dimensions of size 1 from this tensor,
# converting it to a 2-dimensional tensor with shape [out_channels, in_channels].
# This is because the corresponding layer in the current model might be a linear layer
# rather than a convolutional layer, and the weights for a linear layer are expected to be a 2-dimensional tensor.
if "conv_shortcut.weight" in key:
value = value.squeeze()
_debug_print(f"Squeezed 'conv_shortcut.weight' in {key}")
# Transform the weights from 1x1 convs to linear
if len(value.shape) == 4 and ("proj_in" in key or "proj_out" in key):
value = value.squeeze()
_debug_print(f"Squeezed 'proj_in' or 'proj_out' in {key}")
if len(value.shape) == 4:
value = value.transpose(0, 2, 3, 1)
_debug_print(f"Transposed dimensions in {key}")
return [(key, _from_numpy(value))]
def map_clip_text_encoder_weights(key, value):
# Remove prefixes
if key.startswith("text_model."):
key = key[11:]
_debug_print(f"Removed 'text_model.' prefix from {key}")
if key.startswith("embeddings."):
key = key[11:]
_debug_print(f"Removed 'embeddings.' prefix from {key}")
if key.startswith("encoder."):
key = key[8:]
_debug_print(f"Removed 'encoder.' prefix from {key}")
# Map attention layers
if "self_attn." in key:
key = key.replace("self_attn.", "attention.")
_debug_print(f"Replaced 'self_attn.' with 'attention.' in {key}")
if "q_proj." in key:
key = key.replace("q_proj.", "query_proj.")
_debug_print(f"Replaced 'q_proj.' with 'query_proj.' in {key}")
if "k_proj." in key:
key = key.replace("k_proj.", "key_proj.")
_debug_print(f"Replaced 'k_proj.' with 'key_proj.' in {key}")
if "v_proj." in key:
key = key.replace("v_proj.", "value_proj.")
_debug_print(f"Replaced 'v_proj.' with 'value_proj.' in {key}")
# Map ffn layers
if "mlp.fc1" in key:
key = key.replace("mlp.fc1", "linear1")
_debug_print(f"Replaced 'mlp.fc1' with 'linear1' in {key}")
if "mlp.fc2" in key:
key = key.replace("mlp.fc2", "linear2")
_debug_print(f"Replaced 'mlp.fc2' with 'linear2' in {key}")
return [(key, _from_numpy(value))]
def map_vae_weights(key, value):
# Map up/downsampling
if "downsamplers" in key:
key = key.replace("downsamplers.0.conv", "downsample")
_debug_print(f"Replaced 'downsamplers.0.conv' with 'downsample' in {key}")
if "upsamplers" in key:
key = key.replace("upsamplers.0.conv", "upsample")
_debug_print(f"Replaced 'upsamplers.0.conv' with 'upsample' in {key}")
# Map attention layers
if "to_k" in key:
key = key.replace("to_k", "key_proj")
_debug_print(f"Replaced 'to_k' with 'key_proj' in {key}")
if "to_out.0" in key:
key = key.replace("to_out.0", "out_proj")
_debug_print(f"Replaced 'to_out.0' with 'out_proj' in {key}")
if "to_q" in key:
key = key.replace("to_q", "query_proj")
_debug_print(f"Replaced 'to_q' with 'query_proj' in {key}")
if "to_v" in key:
key = key.replace("to_v", "value_proj")
_debug_print(f"Replaced 'to_v' with 'value_proj' in {key}")
# Map the mid block
if "mid_block.resnets.0" in key:
key = key.replace("mid_block.resnets.0", "mid_blocks.0")
_debug_print(f"Replaced 'mid_block.resnets.0' with 'mid_blocks.0' in {key}")
if "mid_block.attentions.0" in key:
key = key.replace("mid_block.attentions.0", "mid_blocks.1")
_debug_print(f"Replaced 'mid_block.attentions.0' with 'mid_blocks.1' in {key}")
if "mid_block.resnets.1" in key:
key = key.replace("mid_block.resnets.1", "mid_blocks.2")
_debug_print(f"Replaced 'mid_block.resnets.1' with 'mid_blocks.2' in {key}")
# Map the quant/post_quant layers
if "quant_conv" in key:
key = key.replace("quant_conv", "quant_proj")
value = value.squeeze()
_debug_print(f"Replaced 'quant_conv' with 'quant_proj' and squeezed value in {key}")
# Map the conv_shortcut to linear
if "conv_shortcut.weight" in key:
value = value.squeeze()
_debug_print(f"Squeezed 'conv_shortcut.weight' in {key}")
# Rearrange the dimensions to [B, H, W, C] - Autoencoder expects: B, H, W, C = x.shape
if len(value.shape) == 4:
value = value.transpose(0, 2, 3, 1)
_debug_print(f"Transposed dimensions in {key}")
return [(key, _from_numpy(value))]
def _flatten(params):
return [(k, v) for p in params for (k, v) in p]
# The weights of the model can be loaded as 16-bit floating point numbers, which is a form of quantization known as half-precision floating point.
# This can reduce the memory requirements of the model by half compared to 32-bit floating point numbers, at the cost of reduced numerical precision.
def _load_safetensor_weights(mapper, model, weight_file, float16: bool = False):
dtype = np.float16 if float16 else np.float32
_debug_print(f"Loading weights from {weight_file}")
with safetensor_open(weight_file, framework="numpy") as f:
keys = list(f.keys())
weights = _flatten([mapper(k, f.get_tensor(k).astype(dtype)) for k in tqdm(keys, desc=f"Loading weights from {weight_file}...")])
model.update(tree_unflatten(weights))
def _check_key(key: str, part: str):
if key not in _MODELS:
raise ValueError(
f"[{part}] '{key}' model not found, choose one of {{{','.join(_MODELS.keys())}}}"
)
def load_unet(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion UNet from Hugging Face Hub."""
_check_key(key, "load_unet")
# Download the config and create the model
unet_config = _MODELS[key]["unet_config"]
with open(hf_hub_download(key, unet_config)) as f:
config = json.load(f)
n_blocks = len(config["block_out_channels"])
model = UNetModel(
UNetConfig(
in_channels=config["in_channels"],
out_channels=config["out_channels"],
block_out_channels=config["block_out_channels"],
layers_per_block=[config["layers_per_block"]] * n_blocks,
num_attention_heads=[config["attention_head_dim"]] * n_blocks
if isinstance(config["attention_head_dim"], int)
else config["attention_head_dim"],
cross_attention_dim=[config["cross_attention_dim"]] * n_blocks,
norm_num_groups=config["norm_num_groups"],
)
)
# Download the weights and map them into the model
unet_weights = _MODELS[key]["unet"]
weight_file = hf_hub_download(key, unet_weights)
_load_safetensor_weights(map_unet_weights, model, weight_file, float16)
return model
def load_text_encoder(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion text encoder from Hugging Face Hub."""
_check_key(key, "load_text_encoder")
# Download the config and create the model
text_encoder_config = _MODELS[key]["text_encoder_config"]
with open(hf_hub_download(key, text_encoder_config)) as f:
config = json.load(f)
model = CLIPTextModel(
CLIPTextModelConfig(
num_layers=config["num_hidden_layers"],
model_dims=config["hidden_size"],
num_heads=config["num_attention_heads"],
max_length=config["max_position_embeddings"],
vocab_size=config["vocab_size"],
)
)
# Download the weights and map them into the model
text_encoder_weights = _MODELS[key]["text_encoder"]
weight_file = hf_hub_download(key, text_encoder_weights)
_load_safetensor_weights(map_clip_text_encoder_weights, model, weight_file, float16)
return model
def load_autoencoder(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion autoencoder from Hugging Face Hub."""
_check_key(key, "load_autoencoder")
# Download the config and create the model
vae_config = _MODELS[key]["vae_config"]
with open(hf_hub_download(key, vae_config)) as f:
config = json.load(f)
| model = Autoencoder( | 7 | 2023-12-25 05:49:34+00:00 | 12k |
Con6924/SPM | train_spm_xl.py | [
{
"identifier": "SPMNetwork",
"path": "src/models/spm.py",
"snippet": "class SPMNetwork(nn.Module):\n UNET_TARGET_REPLACE_MODULE_TRANSFORMER = [\n \"Transformer2DModel\",\n ]\n UNET_TARGET_REPLACE_MODULE_CONV = [\n \"ResnetBlock2D\",\n \"Downsample2D\",\n \"Upsample2... | import argparse
import gc
import torch
import src.engine.train_util as train_util
import wandb
from pathlib import Path
from tqdm import tqdm
from src.models.spm import (
SPMNetwork,
SPMLayer,
)
from src.engine.sampling import sample_xl
from src.models import model_util
from src.evaluation import eval_util
from src.configs import config as config_pkg
from src.configs import prompt as prompt_pkg
from src.configs.config import RootConfig
from src.configs.prompt import PromptEmbedsCache, PromptEmbedsPair, PromptSettings, PromptEmbedsXL | 7,368 | positive_latents.requires_grad = False
neutral_latents.requires_grad = False
loss = prompt_pair.loss(
target_latents=target_latents,
positive_latents=positive_latents,
neutral_latents=neutral_latents,
anchor_latents=anchor_latents,
anchor_latents_ori=anchor_latents_ori,
)
loss["loss"].backward()
if config.train.max_grad_norm > 0:
torch.nn.utils.clip_grad_norm_(
trainable_params, config.train.max_grad_norm, norm_type=2
)
optimizer.step()
lr_scheduler.step()
pbar.set_description(f"Loss*1k: {loss['loss'].item()*1000:.4f}")
# logging
if config.logging.use_wandb:
log_dict = {"iteration": i}
loss = {k: v.detach().cpu().item() for k, v in loss.items()}
log_dict.update(loss)
lrs = lr_scheduler.get_last_lr()
if len(lrs) == 1:
log_dict["lr"] = float(lrs[0])
else:
log_dict["lr/textencoder"] = float(lrs[0])
log_dict["lr/unet"] = float(lrs[-1])
if config.train.optimizer_type.lower().startswith("dadapt"):
log_dict["lr/d*lr"] = (
optimizer.param_groups[0]["d"] * optimizer.param_groups[0]["lr"]
)
# generate sample images
if config.logging.interval > 0 and (
i % config.logging.interval == 0 or i == config.train.iterations - 1
):
print("Generating samples...")
with network:
samples = train_util.text2img(
pipe,
prompts=config.logging.prompts,
negative_prompt=config.logging.negative_prompt,
width=config.logging.width,
height=config.logging.height,
num_inference_steps=config.logging.num_inference_steps,
guidance_scale=config.logging.guidance_scale,
generate_num=config.logging.generate_num,
seed=config.logging.seed,
)
for text, img in samples:
log_dict[text] = wandb.Image(img)
# evaluate on the generated images
print("Evaluating CLIPScore and CLIPAccuracy...")
with network:
clip_scores, clip_accs = eval_util.clip_eval(pipe, config)
for prompt, clip_score, clip_accuracy in zip(
config.logging.prompts, clip_scores, clip_accs
):
log_dict[f"CLIPScore/{prompt}"] = clip_score
log_dict[f"CLIPAccuracy/{prompt}"] = clip_accuracy
log_dict[f"CLIPScore/average"] = sum(clip_scores) / len(clip_scores)
log_dict[f"CLIPAccuracy/average"] = sum(clip_accs) / len(clip_accs)
wandb.log(log_dict)
# save model
if (
i % config.save.per_steps == 0
and i != 0
and i != config.train.iterations - 1
):
print("Saving...")
save_path.mkdir(parents=True, exist_ok=True)
network.save_weights(
save_path / f"{config.save.name}_{i}steps.safetensors",
dtype=save_weight_dtype,
metadata=model_metadata,
)
del (
positive_latents,
neutral_latents,
target_latents,
latents,
anchor_latents,
anchor_latents_ori,
)
flush()
print("Saving...")
save_path.mkdir(parents=True, exist_ok=True)
network.save_weights(
save_path / f"{config.save.name}_last.safetensors",
dtype=save_weight_dtype,
metadata=model_metadata,
)
del (
unet,
noise_scheduler,
loss,
optimizer,
network,
)
flush()
print("Done.")
def main(args):
config_file = args.config_file
config = config_pkg.load_config_from_yaml(config_file)
| # ref:
# - https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py#L566
# - https://huggingface.co/spaces/baulab/Erasing-Concepts-In-Diffusion/blob/main/train.py
# - https://github.com/p1atdev/LECO/blob/main/train_lora_xl.py
DEVICE_CUDA = torch.device("cuda:0")
NUM_IMAGES_PER_PROMPT = 1
def flush():
torch.cuda.empty_cache()
gc.collect()
def train(
config: RootConfig,
prompts: list[PromptSettings],
):
metadata = {
"prompts": ",".join([prompt.json() for prompt in prompts]),
"config": config.json(),
}
model_metadata = {
"prompts": ",".join([prompt.target for prompt in prompts]),
"rank": str(config.network.rank),
"alpha": str(config.network.alpha),
}
save_path = Path(config.save.path)
if config.logging.verbose:
print(metadata)
weight_dtype = config_pkg.parse_precision(config.train.precision)
save_weight_dtype = config_pkg.parse_precision(config.train.precision)
if config.logging.use_wandb:
wandb.init(project=f"SPM",
config=metadata,
name=config.logging.run_name,
settings=wandb.Settings(symlink=False))
(
tokenizers,
text_encoders,
unet,
noise_scheduler,
pipe
) = model_util.load_models_xl(
config.pretrained_model.name_or_path,
scheduler_name=config.train.noise_scheduler,
)
for text_encoder in text_encoders:
text_encoder.to(DEVICE_CUDA, dtype=weight_dtype)
text_encoder.requires_grad_(False)
text_encoder.eval()
unet.to(DEVICE_CUDA, dtype=weight_dtype)
unet.enable_xformers_memory_efficient_attention()
unet.requires_grad_(False)
unet.eval()
network = SPMNetwork(
unet,
rank=config.network.rank,
multiplier=1.0,
alpha=config.network.alpha,
module=SPMLayer,
).to(DEVICE_CUDA, dtype=weight_dtype)
trainable_params = network.prepare_optimizer_params(
config.train.text_encoder_lr, config.train.unet_lr, config.train.lr
)
optimizer_name, optimizer_args, optimizer = train_util.get_optimizer(
config, trainable_params
)
lr_scheduler = train_util.get_scheduler_fix(config, optimizer)
criteria = torch.nn.MSELoss()
print("Prompts")
for settings in prompts:
print(settings)
cache = PromptEmbedsCache()
prompt_pairs: list[PromptEmbedsPair] = []
with torch.no_grad():
for settings in prompts:
for prompt in [
settings.target,
settings.positive,
settings.neutral,
settings.unconditional,
]:
if cache[prompt] == None:
cache[prompt] = PromptEmbedsXL(
train_util.encode_prompts_xl(
tokenizers,
text_encoders,
[prompt],
num_images_per_prompt=NUM_IMAGES_PER_PROMPT,
)
)
prompt_pair = PromptEmbedsPair(
criteria,
cache[settings.target],
cache[settings.positive],
cache[settings.unconditional],
cache[settings.neutral],
settings,
)
assert prompt_pair.sampling_batch_size % prompt_pair.batch_size == 0
prompt_pairs.append(prompt_pair)
flush()
pbar = tqdm(range(config.train.iterations))
loss = None
for i in pbar:
with torch.no_grad():
noise_scheduler.set_timesteps(
config.train.max_denoising_steps, device=DEVICE_CUDA
)
optimizer.zero_grad()
prompt_pair: PromptEmbedsPair = prompt_pairs[
torch.randint(0, len(prompt_pairs), (1,)).item()
]
timesteps_to = torch.randint(
1, config.train.max_denoising_steps, (1,)
).item()
height, width = (
prompt_pair.resolution,
prompt_pair.resolution,
)
if prompt_pair.dynamic_resolution:
height, width = train_util.get_random_resolution_in_bucket(
prompt_pair.resolution
)
if config.logging.verbose:
print("guidance_scale:", prompt_pair.guidance_scale)
print("resolution:", prompt_pair.resolution)
print("dynamic_resolution:", prompt_pair.dynamic_resolution)
if prompt_pair.dynamic_resolution:
print("bucketed resolution:", (height, width))
print("batch_size:", prompt_pair.batch_size)
print("dynamic_crops:", prompt_pair.dynamic_crops)
latents = train_util.get_initial_latents(
noise_scheduler, prompt_pair.batch_size, height, width, 1
).to(DEVICE_CUDA, dtype=weight_dtype)
add_time_ids = train_util.get_add_time_ids(
height,
width,
dynamic_crops=prompt_pair.dynamic_crops,
dtype=weight_dtype,
).to(DEVICE_CUDA, dtype=weight_dtype)
with network:
denoised_latents = train_util.diffusion_xl(
unet,
noise_scheduler,
latents,
text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.text_embeds,
prompt_pair.target.text_embeds,
prompt_pair.batch_size,
),
add_text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.pooled_embeds,
prompt_pair.target.pooled_embeds,
prompt_pair.batch_size,
),
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.batch_size
),
start_timesteps=0,
total_timesteps=timesteps_to,
guidance_scale=3,
)
noise_scheduler.set_timesteps(1000)
current_timestep = noise_scheduler.timesteps[
int(timesteps_to * 1000 / config.train.max_denoising_steps)
]
positive_latents = train_util.predict_noise_xl(
unet,
noise_scheduler,
current_timestep,
denoised_latents,
text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.text_embeds,
prompt_pair.positive.text_embeds,
prompt_pair.batch_size,
),
add_text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.pooled_embeds,
prompt_pair.positive.pooled_embeds,
prompt_pair.batch_size,
),
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
neutral_latents = train_util.predict_noise_xl(
unet,
noise_scheduler,
current_timestep,
denoised_latents,
text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.text_embeds,
prompt_pair.neutral.text_embeds,
prompt_pair.batch_size,
),
add_text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.pooled_embeds,
prompt_pair.neutral.pooled_embeds,
prompt_pair.batch_size,
),
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
with network:
target_latents = train_util.predict_noise_xl(
unet,
noise_scheduler,
current_timestep,
denoised_latents,
text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.text_embeds,
prompt_pair.target.text_embeds,
prompt_pair.batch_size,
),
add_text_embeddings=train_util.concat_embeddings(
prompt_pair.unconditional.pooled_embeds,
prompt_pair.target.pooled_embeds,
prompt_pair.batch_size,
),
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
# ------------------------- latent anchoring part -----------------------------
if prompt_pair.action == "erase_with_la":
# noise sampling
anchors_text, anchors_pool = sample_xl(prompt_pair, tokenizers=tokenizers, text_encoders=text_encoders)
# get latents
repeat = prompt_pair.sampling_batch_size // prompt_pair.batch_size
# TODO: target or positive?
with network:
anchor_latents = train_util.predict_noise_xl(
unet,
noise_scheduler,
current_timestep,
denoised_latents.repeat(repeat, 1, 1, 1),
text_embeddings=anchors_text,
add_text_embeddings=anchors_pool,
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.sampling_batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
with torch.no_grad():
anchor_latents_ori = train_util.predict_noise_xl(
unet,
noise_scheduler,
current_timestep,
denoised_latents.repeat(repeat, 1, 1, 1),
text_embeddings=anchors_text,
add_text_embeddings=anchors_pool,
add_time_ids=train_util.concat_embeddings(
add_time_ids, add_time_ids, prompt_pair.sampling_batch_size
),
guidance_scale=1,
).to("cpu", dtype=torch.float32)
anchor_latents_ori.requires_grad_ = False
else:
anchor_latents = None
anchor_latents_ori = None
# ----------------------------------------------------------------
positive_latents.requires_grad = False
neutral_latents.requires_grad = False
loss = prompt_pair.loss(
target_latents=target_latents,
positive_latents=positive_latents,
neutral_latents=neutral_latents,
anchor_latents=anchor_latents,
anchor_latents_ori=anchor_latents_ori,
)
loss["loss"].backward()
if config.train.max_grad_norm > 0:
torch.nn.utils.clip_grad_norm_(
trainable_params, config.train.max_grad_norm, norm_type=2
)
optimizer.step()
lr_scheduler.step()
pbar.set_description(f"Loss*1k: {loss['loss'].item()*1000:.4f}")
# logging
if config.logging.use_wandb:
log_dict = {"iteration": i}
loss = {k: v.detach().cpu().item() for k, v in loss.items()}
log_dict.update(loss)
lrs = lr_scheduler.get_last_lr()
if len(lrs) == 1:
log_dict["lr"] = float(lrs[0])
else:
log_dict["lr/textencoder"] = float(lrs[0])
log_dict["lr/unet"] = float(lrs[-1])
if config.train.optimizer_type.lower().startswith("dadapt"):
log_dict["lr/d*lr"] = (
optimizer.param_groups[0]["d"] * optimizer.param_groups[0]["lr"]
)
# generate sample images
if config.logging.interval > 0 and (
i % config.logging.interval == 0 or i == config.train.iterations - 1
):
print("Generating samples...")
with network:
samples = train_util.text2img(
pipe,
prompts=config.logging.prompts,
negative_prompt=config.logging.negative_prompt,
width=config.logging.width,
height=config.logging.height,
num_inference_steps=config.logging.num_inference_steps,
guidance_scale=config.logging.guidance_scale,
generate_num=config.logging.generate_num,
seed=config.logging.seed,
)
for text, img in samples:
log_dict[text] = wandb.Image(img)
# evaluate on the generated images
print("Evaluating CLIPScore and CLIPAccuracy...")
with network:
clip_scores, clip_accs = eval_util.clip_eval(pipe, config)
for prompt, clip_score, clip_accuracy in zip(
config.logging.prompts, clip_scores, clip_accs
):
log_dict[f"CLIPScore/{prompt}"] = clip_score
log_dict[f"CLIPAccuracy/{prompt}"] = clip_accuracy
log_dict[f"CLIPScore/average"] = sum(clip_scores) / len(clip_scores)
log_dict[f"CLIPAccuracy/average"] = sum(clip_accs) / len(clip_accs)
wandb.log(log_dict)
# save model
if (
i % config.save.per_steps == 0
and i != 0
and i != config.train.iterations - 1
):
print("Saving...")
save_path.mkdir(parents=True, exist_ok=True)
network.save_weights(
save_path / f"{config.save.name}_{i}steps.safetensors",
dtype=save_weight_dtype,
metadata=model_metadata,
)
del (
positive_latents,
neutral_latents,
target_latents,
latents,
anchor_latents,
anchor_latents_ori,
)
flush()
print("Saving...")
save_path.mkdir(parents=True, exist_ok=True)
network.save_weights(
save_path / f"{config.save.name}_last.safetensors",
dtype=save_weight_dtype,
metadata=model_metadata,
)
del (
unet,
noise_scheduler,
loss,
optimizer,
network,
)
flush()
print("Done.")
def main(args):
config_file = args.config_file
config = config_pkg.load_config_from_yaml(config_file) | prompts = prompt_pkg.load_prompts_from_yaml(config.prompts_file) | 4 | 2023-12-26 03:19:16+00:00 | 12k |
dakpinaroglu/Frame2seq | frame2seq/openfold/utils/feats.py | [
{
"identifier": "protein",
"path": "frame2seq/openfold/np/protein.py",
"snippet": "PICO_TO_ANGSTROM = 0.01\nclass Protein:\ndef from_pdb_string(pdb_str: str, chain_id: Optional[str] = None) -> Protein:\ndef from_proteinnet_string(proteinnet_str: str) -> Protein:\ndef get_pdb_headers(prot: Protein, chain... | import math
import numpy as np
import torch
import torch.nn as nn
import frame2seq.openfold.np.residue_constants as rc
from typing import Dict
from frame2seq.openfold.np import protein
from frame2seq.openfold.utils.rigid_utils import Rotation, Rigid
from frame2seq.openfold.utils.tensor_utils import (
batched_gather,
one_hot,
tree_map,
tensor_tree_map,
) | 10,598 | # Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks):
is_gly = aatype == rc.restype_order["G"]
ca_idx = rc.atom_order["CA"]
cb_idx = rc.atom_order["CB"]
pseudo_beta = torch.where(
is_gly[..., None].expand(*((-1,) * len(is_gly.shape)), 3),
all_atom_positions[..., ca_idx, :],
all_atom_positions[..., cb_idx, :],
)
if all_atom_masks is not None:
pseudo_beta_mask = torch.where(
is_gly,
all_atom_masks[..., ca_idx],
all_atom_masks[..., cb_idx],
)
return pseudo_beta, pseudo_beta_mask
else:
return pseudo_beta
def atom14_to_atom37(atom14, batch):
atom37_data = batched_gather(
atom14,
batch["residx_atom37_to_atom14"],
dim=-2,
no_batch_dims=len(atom14.shape[:-2]),
)
atom37_data = atom37_data * batch["atom37_atom_exists"][..., None]
return atom37_data
def build_template_angle_feat(template_feats):
template_aatype = template_feats["template_aatype"]
torsion_angles_sin_cos = template_feats["template_torsion_angles_sin_cos"]
alt_torsion_angles_sin_cos = template_feats[
"template_alt_torsion_angles_sin_cos"
]
torsion_angles_mask = template_feats["template_torsion_angles_mask"]
template_angle_feat = torch.cat(
[
nn.functional.one_hot(template_aatype, 22),
torsion_angles_sin_cos.reshape(
*torsion_angles_sin_cos.shape[:-2], 14
),
alt_torsion_angles_sin_cos.reshape(
*alt_torsion_angles_sin_cos.shape[:-2], 14
),
torsion_angles_mask,
],
dim=-1,
)
return template_angle_feat
def build_template_pair_feat(
batch,
min_bin, max_bin, no_bins,
use_unit_vector=False,
eps=1e-20, inf=1e8
):
template_mask = batch["template_pseudo_beta_mask"]
template_mask_2d = template_mask[..., None] * template_mask[..., None, :]
# Compute distogram (this seems to differ slightly from Alg. 5)
tpb = batch["template_pseudo_beta"]
dgram = torch.sum(
(tpb[..., None, :] - tpb[..., None, :, :]) ** 2, dim=-1, keepdim=True
)
lower = torch.linspace(min_bin, max_bin, no_bins, device=tpb.device) ** 2
upper = torch.cat([lower[1:], lower.new_tensor([inf])], dim=-1)
dgram = ((dgram > lower) * (dgram < upper)).type(dgram.dtype)
to_concat = [dgram, template_mask_2d[..., None]]
aatype_one_hot = nn.functional.one_hot(
batch["template_aatype"],
rc.restype_num + 2,
)
n_res = batch["template_aatype"].shape[-1]
to_concat.append(
aatype_one_hot[..., None, :, :].expand(
*aatype_one_hot.shape[:-2], n_res, -1, -1
)
)
to_concat.append(
aatype_one_hot[..., None, :].expand(
*aatype_one_hot.shape[:-2], -1, n_res, -1
)
)
n, ca, c = [rc.atom_order[a] for a in ["N", "CA", "C"]]
| # Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def pseudo_beta_fn(aatype, all_atom_positions, all_atom_masks):
is_gly = aatype == rc.restype_order["G"]
ca_idx = rc.atom_order["CA"]
cb_idx = rc.atom_order["CB"]
pseudo_beta = torch.where(
is_gly[..., None].expand(*((-1,) * len(is_gly.shape)), 3),
all_atom_positions[..., ca_idx, :],
all_atom_positions[..., cb_idx, :],
)
if all_atom_masks is not None:
pseudo_beta_mask = torch.where(
is_gly,
all_atom_masks[..., ca_idx],
all_atom_masks[..., cb_idx],
)
return pseudo_beta, pseudo_beta_mask
else:
return pseudo_beta
def atom14_to_atom37(atom14, batch):
atom37_data = batched_gather(
atom14,
batch["residx_atom37_to_atom14"],
dim=-2,
no_batch_dims=len(atom14.shape[:-2]),
)
atom37_data = atom37_data * batch["atom37_atom_exists"][..., None]
return atom37_data
def build_template_angle_feat(template_feats):
template_aatype = template_feats["template_aatype"]
torsion_angles_sin_cos = template_feats["template_torsion_angles_sin_cos"]
alt_torsion_angles_sin_cos = template_feats[
"template_alt_torsion_angles_sin_cos"
]
torsion_angles_mask = template_feats["template_torsion_angles_mask"]
template_angle_feat = torch.cat(
[
nn.functional.one_hot(template_aatype, 22),
torsion_angles_sin_cos.reshape(
*torsion_angles_sin_cos.shape[:-2], 14
),
alt_torsion_angles_sin_cos.reshape(
*alt_torsion_angles_sin_cos.shape[:-2], 14
),
torsion_angles_mask,
],
dim=-1,
)
return template_angle_feat
def build_template_pair_feat(
batch,
min_bin, max_bin, no_bins,
use_unit_vector=False,
eps=1e-20, inf=1e8
):
template_mask = batch["template_pseudo_beta_mask"]
template_mask_2d = template_mask[..., None] * template_mask[..., None, :]
# Compute distogram (this seems to differ slightly from Alg. 5)
tpb = batch["template_pseudo_beta"]
dgram = torch.sum(
(tpb[..., None, :] - tpb[..., None, :, :]) ** 2, dim=-1, keepdim=True
)
lower = torch.linspace(min_bin, max_bin, no_bins, device=tpb.device) ** 2
upper = torch.cat([lower[1:], lower.new_tensor([inf])], dim=-1)
dgram = ((dgram > lower) * (dgram < upper)).type(dgram.dtype)
to_concat = [dgram, template_mask_2d[..., None]]
aatype_one_hot = nn.functional.one_hot(
batch["template_aatype"],
rc.restype_num + 2,
)
n_res = batch["template_aatype"].shape[-1]
to_concat.append(
aatype_one_hot[..., None, :, :].expand(
*aatype_one_hot.shape[:-2], n_res, -1, -1
)
)
to_concat.append(
aatype_one_hot[..., None, :].expand(
*aatype_one_hot.shape[:-2], -1, n_res, -1
)
)
n, ca, c = [rc.atom_order[a] for a in ["N", "CA", "C"]] | rigids = Rigid.make_transform_from_reference( | 2 | 2023-12-25 09:29:36+00:00 | 12k |
wwxu21/CUT | finetune_unlikelihood.py | [
{
"identifier": "LlamaForCausalLM",
"path": "modeling_llama_unlikelihood.py",
"snippet": "class LlamaForCausalLM(LlamaPreTrainedModel):\n _tied_weights_keys = [\"lm_head.weight\"]\n\n def __init__(self, config, threshold):\n super().__init__(config)\n self.model = LlamaModel(config)\... | import os
import sys
import json
import fire
import torch
import transformers
import numpy as np
import random
from typing import List
from torch.utils.data import DataLoader
from datasets import load_dataset, concatenate_datasets, Dataset
from transformers import TrainerCallback, TrainingArguments, TrainerState, TrainerControl
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
from peft import (
LoraConfig,
prepare_model_for_int8_training,
set_peft_model_state_dict,
MODEL_TYPE_TO_PEFT_MODEL_MAPPING,
PeftModel,
)
from peft.utils import _prepare_prompt_learning_config
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.utils import PaddingStrategy
from transformers import LlamaTokenizer, LlamaConfig
from modeling_llama_unlikelihood import LlamaForCausalLM, PeftModelForCausalLM
from prompter import Prompter
from typing import Optional, Union, Any
from dataclasses import dataclass | 7,333 | args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
**kwargs,
):
checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}")
kwargs["model"].save_pretrained(checkpoint_folder)
pytorch_model_path = os.path.join(checkpoint_folder, "pytorch_model.bin")
torch.save({}, pytorch_model_path)
return control
class LoadBestPeftModelCallback(TrainerCallback):
def on_train_end(
self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
**kwargs,
):
print(f"Loading best peft model from {state.best_model_checkpoint} (score: {state.best_metric}).")
best_model_path = os.path.join(state.best_model_checkpoint, "adapter_model.bin")
adapters_weights = torch.load(best_model_path)
model = kwargs["model"]
set_peft_model_state_dict(model, adapters_weights)
return control
def get_peft_model(model, peft_config, adapter_name: str = "default"):
"""
Returns a Peft model object from a model and a config.
Args:
model ([`transformers.PreTrainedModel`]): Model to be wrapped.
peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model.
"""
model_config = getattr(model, "config", {"model_type": "custom"})
if hasattr(model_config, "to_dict"):
model_config = model_config.to_dict()
peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None)
if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not peft_config.is_prompt_learning:
return PeftModel(model, peft_config, adapter_name=adapter_name)
if peft_config.is_prompt_learning:
peft_config = _prepare_prompt_learning_config(peft_config, model_config)
return PeftModelForCausalLM(model, peft_config, adapter_name=adapter_name)
def train(
# model/data params
base_model: str = "",
data_path: str = "",
output_dir: str = "",
# training hyperparams
batch_size: int = 128,
micro_batch_size: int = 8,
num_epochs: int = 1,
learning_rate: float = 3e-4,
cutoff_len: int = 4096,
val_set_size: int = 0,
lr_scheduler: str = "cosine",
warmup_steps: int = 100,
# lora hyperparams
lora_r: int = 16,
lora_alpha: int = 16,
lora_dropout: float = 0.05,
# from peft docs: ["q_proj", "k_proj", "v_proj", "o_proj", "fc_in", "fc_out", "wte", "gate_proj", "down_proj", "up_proj"]
lora_target_modules: List[str] = ["gate_proj", "down_proj", "up_proj"],
# llm hyperparams
train_on_inputs: bool = False, # if False, masks out inputs in loss
add_eos_token: bool = False,
group_by_length: bool = False, # faster, but produces an odd training loss curve
# wandb params
wandb_project: str = "",
wandb_run_name: str = "",
wandb_watch: str = "", # options: false | gradients | all
wandb_log_model: str = "", # options: false | true
resume_from_checkpoint: str = None, # either training checkpoint or final adapter
prompt_template_name: str = "alpaca",
weight_unlike: float = 0.1,
threshold: float = 1.1,
downsample: float = -1,
debug: bool = False,
):
if int(os.environ.get("LOCAL_RANK", 0)) == 0:
print(
f"Params using prompt template {prompt_template_name}\n"
f"the unlikelihood weight for the incorrect token in the incorrect response: {weight_unlike}\n"
f"the threshold to determine the unlikelihood token: {threshold}\n"
f"downssample rate for Hindsight-P: {downsample}\n"
f"base_model: {base_model}\n"
f"data_path: {data_path}\n"
f"output_dir: {output_dir}\n"
f"batch_size: {batch_size}\n"
f"micro_batch_size: {micro_batch_size}\n"
f"num_epochs: {num_epochs}\n"
f"learning_rate: {learning_rate}\n"
f"cutoff_len: {cutoff_len}\n"
f"val_set_size: {val_set_size}\n"
f"lr_scheduler: {lr_scheduler}\n"
f"warmup_steps: {warmup_steps}\n"
f"lora_r: {lora_r}\n"
f"lora_alpha: {lora_alpha}\n"
f"lora_dropout: {lora_dropout}\n"
f"lora_target_modules: {lora_target_modules}\n"
f"train_on_inputs: {train_on_inputs}\n"
f"add_eos_token: {add_eos_token}\n"
f"group_by_length: {group_by_length}\n"
f"wandb_project: {wandb_project}\n"
f"wandb_run_name: {wandb_run_name}\n"
f"wandb_watch: {wandb_watch}\n"
f"wandb_log_model: {wandb_log_model}\n"
f"resume_from_checkpoint: {resume_from_checkpoint or False}\n"
)
assert (
base_model
), "Please specify a --base_model, e.g. --base_model='huggyllama/llama-7b'"
gradient_accumulation_steps = batch_size // micro_batch_size
| seed = 42
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
@dataclass
class MyDataCollator:
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):
The tokenizer used for encoding the data.
model ([`PreTrainedModel`]):
The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to
prepare the *decoder_input_ids*
This is useful when using *label_smoothing* to avoid calculating loss twice.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
- `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (`int`, *optional*):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (`int`, *optional*, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
return_tensors (`str`):
The type of Tensor to return. Allowable values are "np", "pt" and "tf".
"""
tokenizer: PreTrainedTokenizerBase
model: Optional[Any] = None
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
return_tensors: str = "pt"
def __call__(self, features, return_tensors=None):
if return_tensors is None:
return_tensors = self.return_tensors
labels = [feature["labels"] for feature in features] if "labels" in features[0].keys() else None
labels_neg = [feature["labels_neg"] for feature in features] if "labels_neg" in features[0].keys() else None
# We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the
# same length to return tensors.
if labels is not None:
max_label_length = max(len(l) for l in labels)
if labels_neg is not None:
max_label_length_neg = max(len(l) for l in labels_neg)
max_label_length = max(max_label_length, max_label_length_neg)
if self.pad_to_multiple_of is not None:
max_label_length = (
(max_label_length + self.pad_to_multiple_of - 1)
// self.pad_to_multiple_of
* self.pad_to_multiple_of
)
# self.tokenizer.padding_side = "left"
padding_side = self.tokenizer.padding_side
for feature in features:
feature['weight_like'] = [feature['weight_like']]
feature['weight_unlike'] = [feature['weight_unlike']]
remainder = [self.label_pad_token_id] * (max_label_length - len(feature["labels"]))
remainder_length = max_label_length - len(feature["labels_neg"])
remainder_label = [self.label_pad_token_id] * remainder_length
remainder_ids = [self.tokenizer.pad_token_id] * remainder_length
remainder_mask = [0] * remainder_length
if isinstance(feature["labels"], list):
feature["labels"] = (
feature["labels"] + remainder if padding_side == "right" else remainder + feature["labels"]
)
feature["labels_neg"] = (
feature["labels_neg"] + remainder_label if padding_side == "right" else remainder_label + feature["labels_neg"]
)
feature["input_ids_neg"] = (
feature["input_ids_neg"] + remainder_ids if padding_side == "right" else remainder_ids + feature["input_ids_neg"]
)
feature["attention_mask_neg"] = (
feature["attention_mask_neg"] + remainder_mask if padding_side == "right" else remainder_mask + feature["attention_mask_neg"]
)
elif padding_side == "right":
feature["labels"] = np.concatenate([feature["labels"], remainder]).astype(np.int64)
feature["labels_neg"] = np.concatenate([feature["labels_neg"], remainder_label]).astype(np.int64)
feature["input_ids_neg"] = np.concatenate([feature["input_ids_neg"], remainder_ids]).astype(np.int64)
feature["attention_mask_neg"] = np.concatenate([feature["attention_mask_neg"], remainder_mask]).astype(np.int64)
else:
feature["labels"] = np.concatenate([remainder, feature["labels"]]).astype(np.int64)
feature["labels_neg"] = np.concatenate([remainder_label, feature["labels_neg"]]).astype(np.int64)
feature["input_ids_neg"] = np.concatenate([remainder_ids, feature["input_ids_neg"]]).astype(np.int64)
feature["attention_mask_neg"] = np.concatenate([remainder_mask, feature["attention_mask_neg"]]).astype(np.int64)
features = self.tokenizer.pad(
features,
padding=self.padding,
max_length=max_label_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=return_tensors,
)
# prepare decoder_input_ids
if (
labels is not None
and self.model is not None
and hasattr(self.model, "prepare_decoder_input_ids_from_labels")
):
decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=features["labels"])
features["decoder_input_ids"] = decoder_input_ids
return features
class SavePeftModelCallback(TrainerCallback):
def on_save(
self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
**kwargs,
):
checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}")
kwargs["model"].save_pretrained(checkpoint_folder)
pytorch_model_path = os.path.join(checkpoint_folder, "pytorch_model.bin")
torch.save({}, pytorch_model_path)
return control
class LoadBestPeftModelCallback(TrainerCallback):
def on_train_end(
self,
args: TrainingArguments,
state: TrainerState,
control: TrainerControl,
**kwargs,
):
print(f"Loading best peft model from {state.best_model_checkpoint} (score: {state.best_metric}).")
best_model_path = os.path.join(state.best_model_checkpoint, "adapter_model.bin")
adapters_weights = torch.load(best_model_path)
model = kwargs["model"]
set_peft_model_state_dict(model, adapters_weights)
return control
def get_peft_model(model, peft_config, adapter_name: str = "default"):
"""
Returns a Peft model object from a model and a config.
Args:
model ([`transformers.PreTrainedModel`]): Model to be wrapped.
peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model.
"""
model_config = getattr(model, "config", {"model_type": "custom"})
if hasattr(model_config, "to_dict"):
model_config = model_config.to_dict()
peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None)
if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not peft_config.is_prompt_learning:
return PeftModel(model, peft_config, adapter_name=adapter_name)
if peft_config.is_prompt_learning:
peft_config = _prepare_prompt_learning_config(peft_config, model_config)
return PeftModelForCausalLM(model, peft_config, adapter_name=adapter_name)
def train(
# model/data params
base_model: str = "",
data_path: str = "",
output_dir: str = "",
# training hyperparams
batch_size: int = 128,
micro_batch_size: int = 8,
num_epochs: int = 1,
learning_rate: float = 3e-4,
cutoff_len: int = 4096,
val_set_size: int = 0,
lr_scheduler: str = "cosine",
warmup_steps: int = 100,
# lora hyperparams
lora_r: int = 16,
lora_alpha: int = 16,
lora_dropout: float = 0.05,
# from peft docs: ["q_proj", "k_proj", "v_proj", "o_proj", "fc_in", "fc_out", "wte", "gate_proj", "down_proj", "up_proj"]
lora_target_modules: List[str] = ["gate_proj", "down_proj", "up_proj"],
# llm hyperparams
train_on_inputs: bool = False, # if False, masks out inputs in loss
add_eos_token: bool = False,
group_by_length: bool = False, # faster, but produces an odd training loss curve
# wandb params
wandb_project: str = "",
wandb_run_name: str = "",
wandb_watch: str = "", # options: false | gradients | all
wandb_log_model: str = "", # options: false | true
resume_from_checkpoint: str = None, # either training checkpoint or final adapter
prompt_template_name: str = "alpaca",
weight_unlike: float = 0.1,
threshold: float = 1.1,
downsample: float = -1,
debug: bool = False,
):
if int(os.environ.get("LOCAL_RANK", 0)) == 0:
print(
f"Params using prompt template {prompt_template_name}\n"
f"the unlikelihood weight for the incorrect token in the incorrect response: {weight_unlike}\n"
f"the threshold to determine the unlikelihood token: {threshold}\n"
f"downssample rate for Hindsight-P: {downsample}\n"
f"base_model: {base_model}\n"
f"data_path: {data_path}\n"
f"output_dir: {output_dir}\n"
f"batch_size: {batch_size}\n"
f"micro_batch_size: {micro_batch_size}\n"
f"num_epochs: {num_epochs}\n"
f"learning_rate: {learning_rate}\n"
f"cutoff_len: {cutoff_len}\n"
f"val_set_size: {val_set_size}\n"
f"lr_scheduler: {lr_scheduler}\n"
f"warmup_steps: {warmup_steps}\n"
f"lora_r: {lora_r}\n"
f"lora_alpha: {lora_alpha}\n"
f"lora_dropout: {lora_dropout}\n"
f"lora_target_modules: {lora_target_modules}\n"
f"train_on_inputs: {train_on_inputs}\n"
f"add_eos_token: {add_eos_token}\n"
f"group_by_length: {group_by_length}\n"
f"wandb_project: {wandb_project}\n"
f"wandb_run_name: {wandb_run_name}\n"
f"wandb_watch: {wandb_watch}\n"
f"wandb_log_model: {wandb_log_model}\n"
f"resume_from_checkpoint: {resume_from_checkpoint or False}\n"
)
assert (
base_model
), "Please specify a --base_model, e.g. --base_model='huggyllama/llama-7b'"
gradient_accumulation_steps = batch_size // micro_batch_size
| prompter = Prompter(prompt_template_name) | 2 | 2023-12-22 07:32:19+00:00 | 12k |
usail-hkust/LLMTSCS | utils/oneline.py | [
{
"identifier": "DIC_AGENTS",
"path": "utils/config.py",
"snippet": "DIC_AGENTS = {\n \"Random\": RandomAgent,\n \"Fixedtime\": FixedtimeAgent,\n \"MaxPressure\": MaxPressureAgent,\n \"EfficientMaxPressure\": EfficientMaxPressureAgent,\n \"AdvancedMaxPressure\": AdvancedMaxPressureAgent,\... | from .config import DIC_AGENTS
from .my_utils import merge, get_state, get_state_detail, eight_phase_list, dump_json
from copy import deepcopy
from .cityflow_env import CityFlowEnv
from .pipeline import path_check, copy_cityflow_file, copy_conf_file
from tqdm import tqdm
import os
import time
import numpy as np
import wandb
import threading | 8,027 | print("end reset")
current_time = self.env.get_current_time() # in seconds
all_config = merge(merge(self.dic_agent_conf, self.dic_path), self.dic_traffic_env_conf)
logger = wandb.init(
project=self.dic_traffic_env_conf['PROJECT_NAME'],
group=f"{self.dic_traffic_env_conf['MODEL_NAME']}-{self.roadnet}-{self.trafficflow}-{len(self.dic_traffic_env_conf['PHASE'])}_Phases",
name=f"round_{round}",
config=all_config,
)
start_time = time.time()
state_action_log = [[] for _ in range(len(state))]
while not done and current_time < total_run_cnt:
action_list = []
threads = []
for i in range(len(state)):
# log statistic state
intersection = self.env.intersection_dict[self.env.list_intersection[i].inter_name]
roads = deepcopy(intersection["roads"])
statistic_state, statistic_state_incoming, mean_speed = get_state_detail(roads, self.env)
state_action_log[i].append({"state": statistic_state, "state_incoming": statistic_state_incoming, "approaching_speed": mean_speed})
one_state = state[i]
count = step_num
if "ChatGPT" in self.dic_traffic_env_conf["MODEL_NAME"] or "open_llm" in self.dic_traffic_env_conf["MODEL_NAME"]:
thread = threading.Thread(target=self.agents[i].choose_action, args=(self.env,))
threads.append(thread)
else:
action = self.agents[i].choose_action(count, one_state)
action_list.append(action)
# multi-thread
if "ChatGPT" in self.dic_traffic_env_conf["MODEL_NAME"]:
for thread in threads:
thread.start()
for thread in tqdm(threads):
thread.join()
for i in range(len(state)):
action = self.agents[i].temp_action_logger
action_list.append(action)
# multi-thread
if "open_llm" in self.dic_traffic_env_conf["MODEL_NAME"]:
started_thread_id = []
thread_num = self.dic_traffic_env_conf["LLM_API_THREAD_NUM"] if not self.dic_agent_conf["WITH_EXTERNAL_API"] else 2
for i, thread in enumerate(tqdm(threads)):
thread.start()
started_thread_id.append(i)
if (i + 1) % thread_num == 0:
for t_id in started_thread_id:
threads[t_id].join()
started_thread_id = []
for i in range(len(state)):
action = self.agents[i].temp_action_logger
action_list.append(action)
next_state, reward, done, _ = self.env.step(action_list)
# log action
for i in range(len(state)):
state_action_log[i][-1]["action"] = eight_phase_list[action_list[i]]
f_memory = open(file_name_memory, "a")
# output to std out and file
memory_str = 'time = {0}\taction = {1}\tcurrent_phase = {2}\treward = {3}'.\
format(current_time, str(action_list), str([state[i]["cur_phase"][0] for i in range(len(state))]),
str(reward),)
f_memory.write(memory_str + "\n")
f_memory.close()
current_time = self.env.get_current_time() # in seconds
state = next_state
step_num += 1
# calculate logger results
total_reward += sum(reward)
queue_length_inter = []
for inter in self.env.list_intersection:
queue_length_inter.append(sum(inter.dic_feature['lane_num_waiting_vehicle_in']))
queue_length_episode.append(sum(queue_length_inter))
# waiting time
waiting_times = []
for veh in self.env.waiting_vehicle_list:
waiting_times.append(self.env.waiting_vehicle_list[veh]['time'])
waiting_time_episode.append(np.mean(waiting_times) if len(waiting_times) > 0 else 0.0)
# wandb logger
vehicle_travel_times = {}
for inter in self.env.list_intersection:
arrive_left_times = inter.dic_vehicle_arrive_leave_time
for veh in arrive_left_times:
if "shadow" in veh:
continue
enter_time = arrive_left_times[veh]["enter_time"]
leave_time = arrive_left_times[veh]["leave_time"]
if not np.isnan(enter_time):
leave_time = leave_time if not np.isnan(leave_time) else self.dic_traffic_env_conf["RUN_COUNTS"]
if veh not in vehicle_travel_times:
vehicle_travel_times[veh] = [leave_time - enter_time]
else:
vehicle_travel_times[veh].append(leave_time - enter_time)
total_travel_time = np.mean([sum(vehicle_travel_times[veh]) for veh in vehicle_travel_times])
results = {
"test_reward_over": total_reward,
"test_avg_queue_len_over": np.mean(queue_length_episode) if len(queue_length_episode) > 0 else 0,
"test_queuing_vehicle_num_over": np.sum(queue_length_episode) if len(queue_length_episode) > 0 else 0,
"test_avg_waiting_time_over": np.mean(waiting_time_episode) if len(queue_length_episode) > 0 else 0,
"test_avg_travel_time_over": total_travel_time}
logger.log(results)
print(results)
f_state_action = os.path.join(self.dic_path["PATH_TO_WORK_DIRECTORY"], "state_action.json")
|
class OneLine:
def __init__(self, dic_agent_conf, dic_traffic_env_conf, dic_path, roadnet, trafficflow):
self.dic_agent_conf = dic_agent_conf
self.dic_traffic_env_conf = dic_traffic_env_conf
self.dic_path = dic_path
self.agents = []
self.env = None
self.roadnet = roadnet
self.trafficflow = trafficflow
self.models = []
self.initialize()
def initialize(self):
path_check(self.dic_path)
copy_conf_file(self.dic_path, self.dic_agent_conf, self.dic_traffic_env_conf)
copy_cityflow_file(self.dic_path, self.dic_traffic_env_conf)
self.env = CityFlowEnv(
path_to_log=self.dic_path["PATH_TO_WORK_DIRECTORY"],
path_to_work_directory=self.dic_path["PATH_TO_WORK_DIRECTORY"],
dic_traffic_env_conf=self.dic_traffic_env_conf,
dic_path=self.dic_path
)
self.env.reset()
agent_name = self.dic_traffic_env_conf["MODEL_NAME"]
for i in range(self.dic_traffic_env_conf['NUM_INTERSECTIONS']):
if "ChatGPT" in agent_name:
agent = DIC_AGENTS[agent_name.split("-")[0]](
GPT_version=self.dic_agent_conf["GPT_VERSION"],
intersection=self.env.intersection_dict[self.env.list_intersection[i].inter_name],
inter_name=self.env.list_intersection[i].inter_name,
phase_num=len(self.env.list_intersection[i].list_phases),
log_dir=self.dic_agent_conf["LOG_DIR"],
dataset=f"{self.roadnet}-{self.trafficflow}"
)
elif "open_llm" in agent_name:
agent = DIC_AGENTS[agent_name.split("-")[0]](
ex_api=self.dic_agent_conf["WITH_EXTERNAL_API"],
model=agent_name.split("-")[1],
intersection=self.env.intersection_dict[self.env.list_intersection[i].inter_name],
inter_name=self.env.list_intersection[i].inter_name,
phase_num=len(self.env.list_intersection[i].list_phases),
log_dir=self.dic_agent_conf["LOG_DIR"],
dataset=f"{self.roadnet}-{self.trafficflow}"
)
else:
agent = DIC_AGENTS[agent_name](
dic_agent_conf=self.dic_agent_conf,
dic_traffic_env_conf=self.dic_traffic_env_conf,
dic_path=self.dic_path,
cnt_round=0,
intersection_id=str(i)
)
self.agents.append(agent)
def train(self, round):
print("================ start train ================")
total_run_cnt = self.dic_traffic_env_conf["RUN_COUNTS"]
# initialize output streams
file_name_memory = os.path.join(self.dic_path["PATH_TO_WORK_DIRECTORY"], "memories.txt")
done = False
state = self.env.reset()
total_reward = 0.0
queue_length_episode = []
waiting_time_episode = []
step_num = 0
print("end reset")
current_time = self.env.get_current_time() # in seconds
all_config = merge(merge(self.dic_agent_conf, self.dic_path), self.dic_traffic_env_conf)
logger = wandb.init(
project=self.dic_traffic_env_conf['PROJECT_NAME'],
group=f"{self.dic_traffic_env_conf['MODEL_NAME']}-{self.roadnet}-{self.trafficflow}-{len(self.dic_traffic_env_conf['PHASE'])}_Phases",
name=f"round_{round}",
config=all_config,
)
start_time = time.time()
state_action_log = [[] for _ in range(len(state))]
while not done and current_time < total_run_cnt:
action_list = []
threads = []
for i in range(len(state)):
# log statistic state
intersection = self.env.intersection_dict[self.env.list_intersection[i].inter_name]
roads = deepcopy(intersection["roads"])
statistic_state, statistic_state_incoming, mean_speed = get_state_detail(roads, self.env)
state_action_log[i].append({"state": statistic_state, "state_incoming": statistic_state_incoming, "approaching_speed": mean_speed})
one_state = state[i]
count = step_num
if "ChatGPT" in self.dic_traffic_env_conf["MODEL_NAME"] or "open_llm" in self.dic_traffic_env_conf["MODEL_NAME"]:
thread = threading.Thread(target=self.agents[i].choose_action, args=(self.env,))
threads.append(thread)
else:
action = self.agents[i].choose_action(count, one_state)
action_list.append(action)
# multi-thread
if "ChatGPT" in self.dic_traffic_env_conf["MODEL_NAME"]:
for thread in threads:
thread.start()
for thread in tqdm(threads):
thread.join()
for i in range(len(state)):
action = self.agents[i].temp_action_logger
action_list.append(action)
# multi-thread
if "open_llm" in self.dic_traffic_env_conf["MODEL_NAME"]:
started_thread_id = []
thread_num = self.dic_traffic_env_conf["LLM_API_THREAD_NUM"] if not self.dic_agent_conf["WITH_EXTERNAL_API"] else 2
for i, thread in enumerate(tqdm(threads)):
thread.start()
started_thread_id.append(i)
if (i + 1) % thread_num == 0:
for t_id in started_thread_id:
threads[t_id].join()
started_thread_id = []
for i in range(len(state)):
action = self.agents[i].temp_action_logger
action_list.append(action)
next_state, reward, done, _ = self.env.step(action_list)
# log action
for i in range(len(state)):
state_action_log[i][-1]["action"] = eight_phase_list[action_list[i]]
f_memory = open(file_name_memory, "a")
# output to std out and file
memory_str = 'time = {0}\taction = {1}\tcurrent_phase = {2}\treward = {3}'.\
format(current_time, str(action_list), str([state[i]["cur_phase"][0] for i in range(len(state))]),
str(reward),)
f_memory.write(memory_str + "\n")
f_memory.close()
current_time = self.env.get_current_time() # in seconds
state = next_state
step_num += 1
# calculate logger results
total_reward += sum(reward)
queue_length_inter = []
for inter in self.env.list_intersection:
queue_length_inter.append(sum(inter.dic_feature['lane_num_waiting_vehicle_in']))
queue_length_episode.append(sum(queue_length_inter))
# waiting time
waiting_times = []
for veh in self.env.waiting_vehicle_list:
waiting_times.append(self.env.waiting_vehicle_list[veh]['time'])
waiting_time_episode.append(np.mean(waiting_times) if len(waiting_times) > 0 else 0.0)
# wandb logger
vehicle_travel_times = {}
for inter in self.env.list_intersection:
arrive_left_times = inter.dic_vehicle_arrive_leave_time
for veh in arrive_left_times:
if "shadow" in veh:
continue
enter_time = arrive_left_times[veh]["enter_time"]
leave_time = arrive_left_times[veh]["leave_time"]
if not np.isnan(enter_time):
leave_time = leave_time if not np.isnan(leave_time) else self.dic_traffic_env_conf["RUN_COUNTS"]
if veh not in vehicle_travel_times:
vehicle_travel_times[veh] = [leave_time - enter_time]
else:
vehicle_travel_times[veh].append(leave_time - enter_time)
total_travel_time = np.mean([sum(vehicle_travel_times[veh]) for veh in vehicle_travel_times])
results = {
"test_reward_over": total_reward,
"test_avg_queue_len_over": np.mean(queue_length_episode) if len(queue_length_episode) > 0 else 0,
"test_queuing_vehicle_num_over": np.sum(queue_length_episode) if len(queue_length_episode) > 0 else 0,
"test_avg_waiting_time_over": np.mean(waiting_time_episode) if len(queue_length_episode) > 0 else 0,
"test_avg_travel_time_over": total_travel_time}
logger.log(results)
print(results)
f_state_action = os.path.join(self.dic_path["PATH_TO_WORK_DIRECTORY"], "state_action.json") | dump_json(state_action_log, f_state_action) | 1 | 2023-12-26 08:31:47+00:00 | 12k |
KyanChen/TTP | tests/test_datasets/test_transform.py | [
{
"identifier": "LoadBiomedicalData",
"path": "mmseg/datasets/transforms/loading.py",
"snippet": "class LoadBiomedicalData(BaseTransform):\n \"\"\"Load an biomedical image and annotation from file.\n\n The loading data format is as the following:\n\n .. code-block:: python\n\n {\n ... | import copy
import os.path as osp
import mmcv
import numpy as np
import pytest
from unittest import TestCase
from mmengine.registry import init_default_scope
from PIL import Image
from mmseg.datasets.transforms import * # noqa
from mmseg.datasets.transforms import (LoadBiomedicalData,
LoadBiomedicalImageFromFile,
PhotoMetricDistortion, RandomCrop,
RandomDepthMix)
from mmseg.registry import TRANSFORMS
from mmseg.datasets.transforms import (LoadBiomedicalAnnotation,
LoadBiomedicalImageFromFile)
from mmseg.datasets.transforms import LoadBiomedicalImageFromFile
from mmseg.datasets.transforms import LoadBiomedicalImageFromFile
from mmseg.datasets.transforms import LoadBiomedicalImageFromFile | 7,544 | results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = transform(results)
mean = np.array(img_norm_cfg['mean'])
std = np.array(img_norm_cfg['std'])
converted_img = (original_img[..., ::-1] - mean) / std
assert np.allclose(results['img'], converted_img)
def test_random_crop():
# test assertion for invalid random crop
with pytest.raises(AssertionError):
RandomCrop(crop_size=(-1, 0))
results = dict()
img = mmcv.imread(osp.join('tests/data/color.jpg'), 'color')
seg = np.array(Image.open(osp.join('tests/data/seg.png')))
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
h, w, _ = img.shape
pipeline = RandomCrop(crop_size=(h - 20, w - 20))
results = pipeline(results)
assert results['img'].shape[:2] == (h - 20, w - 20)
assert results['img_shape'] == (h - 20, w - 20)
assert results['gt_semantic_seg'].shape[:2] == (h - 20, w - 20)
def test_rgb2gray():
# test assertion out_channels should be greater than 0
with pytest.raises(AssertionError):
transform = dict(type='RGB2Gray', out_channels=-1)
TRANSFORMS.build(transform)
# test assertion weights should be tuple[float]
with pytest.raises(AssertionError):
transform = dict(type='RGB2Gray', out_channels=1, weights=1.1)
TRANSFORMS.build(transform)
# test out_channels is None
transform = dict(type='RGB2Gray')
transform = TRANSFORMS.build(transform)
assert str(transform) == f'RGB2Gray(' \
f'out_channels={None}, ' \
f'weights={(0.299, 0.587, 0.114)})'
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
h, w, c = img.shape
seg = np.array(
Image.open(osp.join(osp.dirname(__file__), '../data/seg.png')))
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = transform(results)
assert results['img'].shape == (h, w, c)
assert results['img_shape'] == (h, w, c)
assert results['ori_shape'] == (h, w, c)
# test out_channels = 2
transform = dict(type='RGB2Gray', out_channels=2)
transform = TRANSFORMS.build(transform)
assert str(transform) == f'RGB2Gray(' \
f'out_channels={2}, ' \
f'weights={(0.299, 0.587, 0.114)})'
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
h, w, c = img.shape
seg = np.array(
Image.open(osp.join(osp.dirname(__file__), '../data/seg.png')))
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = transform(results)
assert results['img'].shape == (h, w, 2)
assert results['img_shape'] == (h, w, 2)
def test_photo_metric_distortion():
results = dict()
img = mmcv.imread(osp.join('tests/data/color.jpg'), 'color')
seg = np.array(Image.open(osp.join('tests/data/seg.png')))
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
| # Copyright (c) OpenMMLab. All rights reserved.
init_default_scope('mmseg')
def test_resize():
# Test `Resize`, `RandomResize` and `RandomChoiceResize` from
# MMCV transform. Noted: `RandomResize` has args `scales` but
# `Resize` and `RandomResize` has args `scale`.
transform = dict(type='Resize', scale=(1333, 800), keep_ratio=True)
resize_module = TRANSFORMS.build(transform)
results = dict()
# (288, 512, 3)
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
resized_results = resize_module(results.copy())
# img_shape = results['img'].shape[:2] in ``MMCV resize`` function
# so right now it is (750, 1333) rather than (750, 1333, 3)
assert resized_results['img_shape'] == (750, 1333)
# test keep_ratio=False
transform = dict(
type='RandomResize',
scale=(1280, 800),
ratio_range=(1.0, 1.0),
resize_type='Resize',
keep_ratio=False)
resize_module = TRANSFORMS.build(transform)
resized_results = resize_module(results.copy())
assert resized_results['img_shape'] == (800, 1280)
# test `RandomChoiceResize`, which in older mmsegmentation
# `Resize` is multiscale_mode='range'
transform = dict(type='RandomResize', scale=[(1333, 400), (1333, 1200)])
resize_module = TRANSFORMS.build(transform)
resized_results = resize_module(results.copy())
assert max(resized_results['img_shape'][:2]) <= 1333
assert min(resized_results['img_shape'][:2]) >= 400
assert min(resized_results['img_shape'][:2]) <= 1200
# test RandomChoiceResize, which in older mmsegmentation
# `Resize` is multiscale_mode='value'
transform = dict(
type='RandomChoiceResize',
scales=[(1333, 800), (1333, 400)],
resize_type='Resize',
keep_ratio=False)
resize_module = TRANSFORMS.build(transform)
resized_results = resize_module(results.copy())
assert resized_results['img_shape'] in [(800, 1333), (400, 1333)]
transform = dict(type='Resize', scale_factor=(0.9, 1.1), keep_ratio=True)
resize_module = TRANSFORMS.build(transform)
resized_results = resize_module(results.copy())
assert max(resized_results['img_shape'][:2]) <= 1333 * 1.1
# test RandomChoiceResize, which `resize_type` is `ResizeShortestEdge`
transform = dict(
type='RandomChoiceResize',
scales=[128, 256, 512],
resize_type='ResizeShortestEdge',
max_size=1333)
resize_module = TRANSFORMS.build(transform)
resized_results = resize_module(results.copy())
assert resized_results['img_shape'][0] in [128, 256, 512]
transform = dict(
type='RandomChoiceResize',
scales=[512],
resize_type='ResizeShortestEdge',
max_size=512)
resize_module = TRANSFORMS.build(transform)
resized_results = resize_module(results.copy())
assert resized_results['img_shape'][1] == 512
transform = dict(
type='RandomChoiceResize',
scales=[(128, 256), (256, 512), (512, 1024)],
resize_type='ResizeShortestEdge',
max_size=1333)
resize_module = TRANSFORMS.build(transform)
resized_results = resize_module(results.copy())
assert resized_results['img_shape'][0] in [128, 256, 512]
# test scale=None and scale_factor is tuple.
# img shape: (288, 512, 3)
transform = dict(
type='Resize', scale=None, scale_factor=(0.5, 2.0), keep_ratio=True)
resize_module = TRANSFORMS.build(transform)
resized_results = resize_module(results.copy())
assert int(288 * 0.5) <= resized_results['img_shape'][0] <= 288 * 2.0
assert int(512 * 0.5) <= resized_results['img_shape'][1] <= 512 * 2.0
# test minimum resized image shape is 640
transform = dict(type='Resize', scale=(2560, 640), keep_ratio=True)
resize_module = TRANSFORMS.build(transform)
resized_results = resize_module(results.copy())
assert resized_results['img_shape'] == (640, 1138)
# test minimum resized image shape is 640 when img_scale=(512, 640)
# where should define `scale_factor` in MMCV new ``Resize`` function.
min_size_ratio = max(640 / img.shape[0], 640 / img.shape[1])
transform = dict(
type='Resize', scale_factor=min_size_ratio, keep_ratio=True)
resize_module = TRANSFORMS.build(transform)
resized_results = resize_module(results.copy())
assert resized_results['img_shape'] == (640, 1138)
# test h > w
img = np.random.randn(512, 288, 3)
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
min_size_ratio = max(640 / img.shape[0], 640 / img.shape[1])
transform = dict(
type='Resize',
scale=(2560, 640),
scale_factor=min_size_ratio,
keep_ratio=True)
resize_module = TRANSFORMS.build(transform)
resized_results = resize_module(results.copy())
assert resized_results['img_shape'] == (1138, 640)
def test_flip():
# test assertion for invalid prob
with pytest.raises(AssertionError):
transform = dict(type='RandomFlip', prob=1.5)
TRANSFORMS.build(transform)
# test assertion for invalid direction
with pytest.raises(AssertionError):
transform = dict(type='RandomFlip', prob=1.0, direction='horizonta')
TRANSFORMS.build(transform)
transform = dict(type='RandomFlip', prob=1.0)
flip_module = TRANSFORMS.build(transform)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
seg = np.array(
Image.open(osp.join(osp.dirname(__file__), '../data/seg.png')))
original_seg = copy.deepcopy(seg)
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = flip_module(results)
flip_module = TRANSFORMS.build(transform)
results = flip_module(results)
assert np.equal(original_img, results['img']).all()
assert np.equal(original_seg, results['gt_semantic_seg']).all()
results['gt_depth_map'] = seg
results['seg_fields'] = ['gt_depth_map']
results = flip_module(results)
flip_module = TRANSFORMS.build(transform)
results = flip_module(results)
assert np.equal(original_img, results['img']).all()
assert np.equal(original_seg, results['gt_depth_map']).all()
def test_random_rotate_flip():
with pytest.raises(AssertionError):
transform = dict(type='RandomRotFlip', flip_prob=1.5)
TRANSFORMS.build(transform)
with pytest.raises(AssertionError):
transform = dict(type='RandomRotFlip', rotate_prob=1.5)
TRANSFORMS.build(transform)
with pytest.raises(AssertionError):
transform = dict(type='RandomRotFlip', degree=[20, 20, 20])
TRANSFORMS.build(transform)
with pytest.raises(AssertionError):
transform = dict(type='RandomRotFlip', degree=-20)
TRANSFORMS.build(transform)
transform = dict(
type='RandomRotFlip', flip_prob=1.0, rotate_prob=0, degree=20)
rot_flip_module = TRANSFORMS.build(transform)
results = dict()
img = mmcv.imread(
osp.join(
osp.dirname(__file__),
'../data/pseudo_synapse_dataset/img_dir/case0005_slice000.jpg'),
'color')
original_img = copy.deepcopy(img)
seg = np.array(
Image.open(
osp.join(
osp.dirname(__file__),
'../data/pseudo_synapse_dataset/ann_dir/case0005_slice000.png')
))
original_seg = copy.deepcopy(seg)
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
result_flip = rot_flip_module(results)
assert original_img.shape == result_flip['img'].shape
assert original_seg.shape == result_flip['gt_semantic_seg'].shape
transform = dict(
type='RandomRotFlip', flip_prob=0, rotate_prob=1.0, degree=20)
rot_flip_module = TRANSFORMS.build(transform)
result_rotate = rot_flip_module(results)
assert original_img.shape == result_rotate['img'].shape
assert original_seg.shape == result_rotate['gt_semantic_seg'].shape
assert str(transform) == "{'type': 'RandomRotFlip'," \
" 'flip_prob': 0," \
" 'rotate_prob': 1.0," \
" 'degree': 20}"
def test_pad():
# test assertion if both size_divisor and size is None
with pytest.raises(AssertionError):
transform = dict(type='Pad')
TRANSFORMS.build(transform)
transform = dict(type='Pad', size_divisor=32)
transform = TRANSFORMS.build(transform)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = transform(results)
# original img already divisible by 32
assert np.equal(results['img'], original_img).all()
img_shape = results['img'].shape
assert img_shape[0] % 32 == 0
assert img_shape[1] % 32 == 0
def test_normalize():
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True)
transform = dict(type='Normalize', **img_norm_cfg)
transform = TRANSFORMS.build(transform)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = transform(results)
mean = np.array(img_norm_cfg['mean'])
std = np.array(img_norm_cfg['std'])
converted_img = (original_img[..., ::-1] - mean) / std
assert np.allclose(results['img'], converted_img)
def test_random_crop():
# test assertion for invalid random crop
with pytest.raises(AssertionError):
RandomCrop(crop_size=(-1, 0))
results = dict()
img = mmcv.imread(osp.join('tests/data/color.jpg'), 'color')
seg = np.array(Image.open(osp.join('tests/data/seg.png')))
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
h, w, _ = img.shape
pipeline = RandomCrop(crop_size=(h - 20, w - 20))
results = pipeline(results)
assert results['img'].shape[:2] == (h - 20, w - 20)
assert results['img_shape'] == (h - 20, w - 20)
assert results['gt_semantic_seg'].shape[:2] == (h - 20, w - 20)
def test_rgb2gray():
# test assertion out_channels should be greater than 0
with pytest.raises(AssertionError):
transform = dict(type='RGB2Gray', out_channels=-1)
TRANSFORMS.build(transform)
# test assertion weights should be tuple[float]
with pytest.raises(AssertionError):
transform = dict(type='RGB2Gray', out_channels=1, weights=1.1)
TRANSFORMS.build(transform)
# test out_channels is None
transform = dict(type='RGB2Gray')
transform = TRANSFORMS.build(transform)
assert str(transform) == f'RGB2Gray(' \
f'out_channels={None}, ' \
f'weights={(0.299, 0.587, 0.114)})'
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
h, w, c = img.shape
seg = np.array(
Image.open(osp.join(osp.dirname(__file__), '../data/seg.png')))
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = transform(results)
assert results['img'].shape == (h, w, c)
assert results['img_shape'] == (h, w, c)
assert results['ori_shape'] == (h, w, c)
# test out_channels = 2
transform = dict(type='RGB2Gray', out_channels=2)
transform = TRANSFORMS.build(transform)
assert str(transform) == f'RGB2Gray(' \
f'out_channels={2}, ' \
f'weights={(0.299, 0.587, 0.114)})'
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color')
h, w, c = img.shape
seg = np.array(
Image.open(osp.join(osp.dirname(__file__), '../data/seg.png')))
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results = transform(results)
assert results['img'].shape == (h, w, 2)
assert results['img_shape'] == (h, w, 2)
def test_photo_metric_distortion():
results = dict()
img = mmcv.imread(osp.join('tests/data/color.jpg'), 'color')
seg = np.array(Image.open(osp.join('tests/data/seg.png')))
results['img'] = img
results['gt_semantic_seg'] = seg
results['seg_fields'] = ['gt_semantic_seg']
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
| pipeline = PhotoMetricDistortion(saturation_range=(1., 1.)) | 2 | 2023-12-23 08:36:47+00:00 | 12k |
SkierProjects/MultiLabelImageClassificationPytorch | src/utils/training/train_model.py | [
{
"identifier": "LoggerFactory",
"path": "src/utils/logging/loggerfactory.py",
"snippet": "class LoggerFactory:\n DEFAULT_LOG_LEVEL = logging.INFO\n LOG_FILE_MAX_BYTES = 10 * 1024 * 1024 # 10 MB\n LOG_FILE_BACKUP_COUNT = 5 # Keep 5 backup files\n LONG_LOG_FORMAT = \"%(asctime)s - %(name)s ... | from config import config
from src.utils.logging.loggerfactory import LoggerFactory
from src.utils.training.modeltrainer import ModelTrainer
from src.utils.evaluation.modelevaluator import ModelEvaluator
from src.utils.evaluation.test_model import evaluate_model
import torch
import utils.dataset.datasetutils as datasetutils | 8,344 | logger = LoggerFactory.get_logger(f"logger.{__name__}")
def train_model(config=config):
"""
Train a model based on the provided configuration.
Parameters:
config: Configuration module with necessary attributes.
"""
# Initialize the computation device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Get train, validation, and test dataset loaders
train_loader, valid_loader, test_loader = datasetutils.get_train_valid_test_loaders(config=config)
# Initialize the model trainer
| logger = LoggerFactory.get_logger(f"logger.{__name__}")
def train_model(config=config):
"""
Train a model based on the provided configuration.
Parameters:
config: Configuration module with necessary attributes.
"""
# Initialize the computation device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Get train, validation, and test dataset loaders
train_loader, valid_loader, test_loader = datasetutils.get_train_valid_test_loaders(config=config)
# Initialize the model trainer | with ModelTrainer(device, train_loader, valid_loader, test_loader, config=config) as modelTrainer, ModelEvaluator.from_trainer(modelTrainer) as modelEvaluator: | 2 | 2023-12-25 18:45:52+00:00 | 12k |
jpivarski/ragged | src/ragged/_spec_searching_functions.py | [
{
"identifier": "_box",
"path": "src/ragged/_spec_array_object.py",
"snippet": "def _box(\n cls: type[array],\n output: ak.Array | np.number | SupportsDLPack,\n *,\n dtype: None | Dtype = None,\n) -> array:\n if isinstance(output, ak.Array):\n impl = output\n shape, dtype_ob... | import awkward as ak
import numpy as np
from ._spec_array_object import _box, _unbox, array | 10,499 | # BSD 3-Clause License; see https://github.com/scikit-hep/ragged/blob/main/LICENSE
"""
https://data-apis.org/array-api/latest/API_specification/searching_functions.html
"""
from __future__ import annotations
def _remove_optiontype(x: ak.contents.Content) -> ak.contents.Content:
if x.is_list:
return x.copy(content=_remove_optiontype(x.content))
elif x.is_option:
return x.content
else:
return x
def argmax(x: array, /, *, axis: None | int = None, keepdims: bool = False) -> array:
"""
Returns the indices of the maximum values along a specified axis.
When the maximum value occurs multiple times, only the indices
corresponding to the first occurrence are returned.
Args:
x: Input array.
axis: Axis along which to search. If `None`, the function returns the
index of the maximum value of the flattened array.
keepdims: If `True`, the reduced axes (dimensions) are included in the
result as singleton dimensions, and, accordingly, the result is
broadcastable with the input array. Otherwise, if `False`, the
reduced axes (dimensions) are not included in the result.
Returns:
If `axis` is `None`, a zero-dimensional array containing the index of
the first occurrence of the maximum value; otherwise, a
non-zero-dimensional array containing the indices of the maximum
values. The returned array has data type `np.int64`.
https://data-apis.org/array-api/latest/API_specification/generated/array_api.argmax.html
"""
out = np.argmax(*_unbox(x), axis=axis, keepdims=keepdims)
if out is None:
msg = "cannot compute argmax of an array with no data"
raise ValueError(msg)
if isinstance(out, ak.Array):
if ak.any(ak.is_none(out, axis=-1)):
msg = f"cannot compute argmax at axis={axis} because some lists at this depth have zero length"
raise ValueError(msg)
out = ak.Array(
_remove_optiontype(out.layout), behavior=out.behavior, attrs=out.attrs
)
| # BSD 3-Clause License; see https://github.com/scikit-hep/ragged/blob/main/LICENSE
"""
https://data-apis.org/array-api/latest/API_specification/searching_functions.html
"""
from __future__ import annotations
def _remove_optiontype(x: ak.contents.Content) -> ak.contents.Content:
if x.is_list:
return x.copy(content=_remove_optiontype(x.content))
elif x.is_option:
return x.content
else:
return x
def argmax(x: array, /, *, axis: None | int = None, keepdims: bool = False) -> array:
"""
Returns the indices of the maximum values along a specified axis.
When the maximum value occurs multiple times, only the indices
corresponding to the first occurrence are returned.
Args:
x: Input array.
axis: Axis along which to search. If `None`, the function returns the
index of the maximum value of the flattened array.
keepdims: If `True`, the reduced axes (dimensions) are included in the
result as singleton dimensions, and, accordingly, the result is
broadcastable with the input array. Otherwise, if `False`, the
reduced axes (dimensions) are not included in the result.
Returns:
If `axis` is `None`, a zero-dimensional array containing the index of
the first occurrence of the maximum value; otherwise, a
non-zero-dimensional array containing the indices of the maximum
values. The returned array has data type `np.int64`.
https://data-apis.org/array-api/latest/API_specification/generated/array_api.argmax.html
"""
out = np.argmax(*_unbox(x), axis=axis, keepdims=keepdims)
if out is None:
msg = "cannot compute argmax of an array with no data"
raise ValueError(msg)
if isinstance(out, ak.Array):
if ak.any(ak.is_none(out, axis=-1)):
msg = f"cannot compute argmax at axis={axis} because some lists at this depth have zero length"
raise ValueError(msg)
out = ak.Array(
_remove_optiontype(out.layout), behavior=out.behavior, attrs=out.attrs
)
| return _box(type(x), out) | 0 | 2023-12-26 10:53:35+00:00 | 12k |
see2023/Bert-VITS2-ext | onnx_modules/V200/text/chinese.py | [
{
"identifier": "punctuation",
"path": "onnx_modules/V200/text/symbols.py",
"snippet": ""
},
{
"identifier": "ToneSandhi",
"path": "onnx_modules/V200/text/tone_sandhi.py",
"snippet": "class ToneSandhi:\n def __init__(self):\n self.must_neural_tone_words = {\n \"麻烦\",... | import os
import re
import cn2an
import jieba.posseg as psg
from pypinyin import lazy_pinyin, Style
from .symbols import punctuation
from .tone_sandhi import ToneSandhi
from text import chinese_bert
from text.chinese_bert import get_bert_feature | 7,598 |
current_file_path = os.path.dirname(__file__)
pinyin_to_symbol_map = {
line.split("\t")[0]: line.strip().split("\t")[1]
for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines()
}
rep_map = {
":": ",",
";": ",",
",": ",",
"。": ".",
"!": "!",
"?": "?",
"\n": ".",
"·": ",",
"、": ",",
"...": "…",
"$": ".",
"“": "'",
"”": "'",
"‘": "'",
"’": "'",
"(": "'",
")": "'",
"(": "'",
")": "'",
"《": "'",
"》": "'",
"【": "'",
"】": "'",
"[": "'",
"]": "'",
"—": "-",
"~": "-",
"~": "-",
"「": "'",
"」": "'",
}
|
current_file_path = os.path.dirname(__file__)
pinyin_to_symbol_map = {
line.split("\t")[0]: line.strip().split("\t")[1]
for line in open(os.path.join(current_file_path, "opencpop-strict.txt")).readlines()
}
rep_map = {
":": ",",
";": ",",
",": ",",
"。": ".",
"!": "!",
"?": "?",
"\n": ".",
"·": ",",
"、": ",",
"...": "…",
"$": ".",
"“": "'",
"”": "'",
"‘": "'",
"’": "'",
"(": "'",
")": "'",
"(": "'",
")": "'",
"《": "'",
"》": "'",
"【": "'",
"】": "'",
"[": "'",
"]": "'",
"—": "-",
"~": "-",
"~": "-",
"「": "'",
"」": "'",
}
| tone_modifier = ToneSandhi() | 1 | 2023-12-27 03:09:11+00:00 | 12k |
chinhsuanwu/ifusion-threestudio | threestudio/models/geometry/base.py | [
{
"identifier": "IsosurfaceHelper",
"path": "threestudio/models/isosurface.py",
"snippet": "class IsosurfaceHelper(nn.Module):\n points_range: Tuple[float, float] = (0, 1)\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"N 3\"]:\n raise NotImplementedError"
},
{
"identi... | from dataclasses import dataclass, field
from threestudio.models.isosurface import (
IsosurfaceHelper,
MarchingCubeCPUHelper,
MarchingTetrahedraHelper,
)
from threestudio.models.mesh import Mesh
from threestudio.utils.base import BaseModule
from threestudio.utils.ops import chunk_batch, scale_tensor
from threestudio.utils.typing import *
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio | 7,670 |
def contract_to_unisphere(
x: Float[Tensor, "... 3"], bbox: Float[Tensor, "2 3"], unbounded: bool = False
) -> Float[Tensor, "... 3"]:
if unbounded:
x = scale_tensor(x, bbox, (0, 1))
x = x * 2 - 1 # aabb is at [-1, 1]
mag = x.norm(dim=-1, keepdim=True)
mask = mag.squeeze(-1) > 1
x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])
x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]
else:
x = scale_tensor(x, bbox, (0, 1))
return x
class BaseGeometry(BaseModule):
@dataclass
class Config(BaseModule.Config):
pass
cfg: Config
@staticmethod
def create_from(
other: "BaseGeometry", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs
) -> "BaseGeometry":
raise TypeError(
f"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}"
)
def export(self, *args, **kwargs) -> Dict[str, Any]:
return {}
class BaseImplicitGeometry(BaseGeometry):
@dataclass
class Config(BaseGeometry.Config):
radius: float = 1.0
isosurface: bool = True
isosurface_method: str = "mt"
isosurface_resolution: int = 128
isosurface_threshold: Union[float, str] = 0.0
isosurface_chunk: int = 0
isosurface_coarse_to_fine: bool = True
isosurface_deformable_grid: bool = False
isosurface_remove_outliers: bool = True
isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01
cfg: Config
def configure(self) -> None:
self.bbox: Float[Tensor, "2 3"]
self.register_buffer(
"bbox",
torch.as_tensor(
[
[-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],
[self.cfg.radius, self.cfg.radius, self.cfg.radius],
],
dtype=torch.float32,
),
)
self.isosurface_helper: Optional[IsosurfaceHelper] = None
self.unbounded: bool = False
def _initilize_isosurface_helper(self):
if self.cfg.isosurface and self.isosurface_helper is None:
if self.cfg.isosurface_method == "mc-cpu":
self.isosurface_helper = MarchingCubeCPUHelper(
self.cfg.isosurface_resolution
).to(self.device)
elif self.cfg.isosurface_method == "mt":
|
def contract_to_unisphere(
x: Float[Tensor, "... 3"], bbox: Float[Tensor, "2 3"], unbounded: bool = False
) -> Float[Tensor, "... 3"]:
if unbounded:
x = scale_tensor(x, bbox, (0, 1))
x = x * 2 - 1 # aabb is at [-1, 1]
mag = x.norm(dim=-1, keepdim=True)
mask = mag.squeeze(-1) > 1
x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])
x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]
else:
x = scale_tensor(x, bbox, (0, 1))
return x
class BaseGeometry(BaseModule):
@dataclass
class Config(BaseModule.Config):
pass
cfg: Config
@staticmethod
def create_from(
other: "BaseGeometry", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs
) -> "BaseGeometry":
raise TypeError(
f"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}"
)
def export(self, *args, **kwargs) -> Dict[str, Any]:
return {}
class BaseImplicitGeometry(BaseGeometry):
@dataclass
class Config(BaseGeometry.Config):
radius: float = 1.0
isosurface: bool = True
isosurface_method: str = "mt"
isosurface_resolution: int = 128
isosurface_threshold: Union[float, str] = 0.0
isosurface_chunk: int = 0
isosurface_coarse_to_fine: bool = True
isosurface_deformable_grid: bool = False
isosurface_remove_outliers: bool = True
isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01
cfg: Config
def configure(self) -> None:
self.bbox: Float[Tensor, "2 3"]
self.register_buffer(
"bbox",
torch.as_tensor(
[
[-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],
[self.cfg.radius, self.cfg.radius, self.cfg.radius],
],
dtype=torch.float32,
),
)
self.isosurface_helper: Optional[IsosurfaceHelper] = None
self.unbounded: bool = False
def _initilize_isosurface_helper(self):
if self.cfg.isosurface and self.isosurface_helper is None:
if self.cfg.isosurface_method == "mc-cpu":
self.isosurface_helper = MarchingCubeCPUHelper(
self.cfg.isosurface_resolution
).to(self.device)
elif self.cfg.isosurface_method == "mt": | self.isosurface_helper = MarchingTetrahedraHelper( | 2 | 2023-12-27 20:30:33+00:00 | 12k |
gardenifi/server | tests/raspi/mqtt_test.py | [
{
"identifier": "Mqtt",
"path": "app/raspi/mqtt.py",
"snippet": "class Mqtt:\n \"\"\"MQTT Methods Class.\"\"\"\n\n __instance = None\n __lock = threading.Lock()\n client = None\n\n def __new__(cls):\n \"\"\"\n Create a new instance of the Mqtt class using the singleton desig... | import threading
import os
from app.raspi.mqtt import Mqtt
from app.raspi.helpers import Helpers
from app.raspi.const import (
MQTT_TOPIC_STATUS,
MQTT_STATUS_ERR,
MQTT_END,
MQTT_TOPIC_CMD,
MQTT_TOPIC_VALVES,
MQTT_CLIENT_ID,
MQTT_USER,
MQTT_PASS,
MQTT_HOST,
MQTT_PORT,
STATUSES_FILE,
) | 9,685 |
def test_mqtt_singleton(self):
"""
Test that Mqtt object is a singleton.
"""
mqtt_instance1 = Mqtt()
mqtt_instance2 = Mqtt()
assert mqtt_instance1 is mqtt_instance2
def test_mqtt_destroy_instance(self):
"""
Test that Mqtt object can be destroyed.
"""
mqtt_instance = Mqtt()
mqtt_instance.destroy_instance()
assert mqtt_instance.get_mqtt_thread() is None
assert mqtt_instance.get_periodic_updates_thread() is None
def test_mqtt_set_and_get_thread(self):
"""
Test that Mqtt thread can be set and retrieved.
"""
def dummy_target_function():
pass
mqtt_instance = Mqtt()
thread = threading.Thread(target=dummy_target_function)
mqtt_instance.set_mqtt_thread(thread)
assert mqtt_instance.get_mqtt_thread() is thread
def test_mqtt_on_disconnect(self, mocker):
"""
Test that MQTT OnDisconnect method is called.
"""
mqtt_instance = Mqtt()
client = mocker.Mock()
data = mocker.Mock()
return_code = 0
mqtt_instance.on_disconnect(client, data, return_code)
client.connected_flag = False
assert client.connected_flag is False
def test_mqtt_on_connect_non_zero_result_code(self, mocker):
"""
Test that MQTT OnConnect method returns a non-zero result code.
"""
mqtt_instance = Mqtt()
client = mocker.Mock()
userdata = mocker.Mock()
flags = mocker.Mock()
return_code = 1
mqtt_instance.on_connect(client, userdata, flags, return_code)
assert client.connected_flag is True
def test_mqtt_handle_valves_exception(self, mocker):
"""
Test that MQTT HandleValves method raises an exception.
"""
mqtt_instance = Mqtt()
client = mocker.Mock()
data = mocker.Mock()
mocker.patch.object(Helpers, "set_valves", side_effect=Exception("Test Exception"))
mqtt_instance.handle_valves(client, data)
client.publish.assert_called_with(MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + "Test Exception" + MQTT_END, qos=2, retain=True)
def test_on_connect_subscribes_to_topics(self, mocker):
"""
Test that MQTT OnConnect method subscribes to topics.
"""
mqtt_instance = Mqtt()
client_mock = mocker.Mock()
userdata_mock = mocker.Mock()
flags_mock = mocker.Mock()
return_code = 0
mqtt_instance.on_connect(client_mock, userdata_mock, flags_mock, return_code)
client_mock.subscribe.assert_called_with(MQTT_TOPIC_VALVES)
def test_on_connect_starts_periodic_updates_thread(self, mocker):
"""
Test that MQTT OnConnect method starts periodic updates thread.
"""
mqtt_instance = Mqtt()
client_mock = mocker.Mock()
userdata_mock = mocker.Mock()
flags_mock = mocker.Mock()
return_code = 0
mqtt_instance.on_connect(client_mock, userdata_mock, flags_mock, return_code)
assert mqtt_instance.get_periodic_updates_thread().is_alive() is True
def test_on_message_handles_commands(self, mocker):
"""
Test that MQTT OnMessage method handles valves, config, command, and sys commands.
"""
mqtt_instance = Mqtt()
client_mock = mocker.Mock()
userdata_mock = mocker.Mock()
msg_mock = mocker.Mock()
msg_mock.topic = MQTT_TOPIC_CMD
msg_mock.payload.decode.return_value = '{"cmd": 1, "out": 1}'
mqtt_instance.on_message(client_mock, userdata_mock, msg_mock)
assert os.path.exists(STATUSES_FILE), f"The file '{STATUSES_FILE}' does not exist."
def test_mqtt_init(self, mocker):
"""
Test that MQTT Init method initializes MQTT client and connects to the broker.
"""
# Mock the necessary dependencies
mocker.patch("app.raspi.mqtt.logger")
mock_mqtt = mocker.patch("app.raspi.mqtt.mqtt.Client")
mock_client = mock_mqtt.return_value
mock_services = mocker.patch("app.raspi.mqtt.Services")
mock_services.return_value.load_program_cycles_if_exists.side_effect = [None, {"program": "data"}]
# Create an instance of Mqtt and call the mqtt_init method
mqtt_instance = Mqtt()
mqtt_instance.mqtt_init()
# Assert that the necessary methods were called
| """MIT License
Copyright (c) 2023, Marios Karagiannopoulos
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
**Attribution Requirement:**
When using or distributing the software, an attribution to Marios Karagiannopoulos must be included.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
class TestMqtt:
"""
Unit tests for the Mqtt class.
"""
def test_mqtt_singleton(self):
"""
Test that Mqtt object is a singleton.
"""
mqtt_instance1 = Mqtt()
mqtt_instance2 = Mqtt()
assert mqtt_instance1 is mqtt_instance2
def test_mqtt_destroy_instance(self):
"""
Test that Mqtt object can be destroyed.
"""
mqtt_instance = Mqtt()
mqtt_instance.destroy_instance()
assert mqtt_instance.get_mqtt_thread() is None
assert mqtt_instance.get_periodic_updates_thread() is None
def test_mqtt_set_and_get_thread(self):
"""
Test that Mqtt thread can be set and retrieved.
"""
def dummy_target_function():
pass
mqtt_instance = Mqtt()
thread = threading.Thread(target=dummy_target_function)
mqtt_instance.set_mqtt_thread(thread)
assert mqtt_instance.get_mqtt_thread() is thread
def test_mqtt_on_disconnect(self, mocker):
"""
Test that MQTT OnDisconnect method is called.
"""
mqtt_instance = Mqtt()
client = mocker.Mock()
data = mocker.Mock()
return_code = 0
mqtt_instance.on_disconnect(client, data, return_code)
client.connected_flag = False
assert client.connected_flag is False
def test_mqtt_on_connect_non_zero_result_code(self, mocker):
"""
Test that MQTT OnConnect method returns a non-zero result code.
"""
mqtt_instance = Mqtt()
client = mocker.Mock()
userdata = mocker.Mock()
flags = mocker.Mock()
return_code = 1
mqtt_instance.on_connect(client, userdata, flags, return_code)
assert client.connected_flag is True
def test_mqtt_handle_valves_exception(self, mocker):
"""
Test that MQTT HandleValves method raises an exception.
"""
mqtt_instance = Mqtt()
client = mocker.Mock()
data = mocker.Mock()
mocker.patch.object(Helpers, "set_valves", side_effect=Exception("Test Exception"))
mqtt_instance.handle_valves(client, data)
client.publish.assert_called_with(MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + "Test Exception" + MQTT_END, qos=2, retain=True)
def test_on_connect_subscribes_to_topics(self, mocker):
"""
Test that MQTT OnConnect method subscribes to topics.
"""
mqtt_instance = Mqtt()
client_mock = mocker.Mock()
userdata_mock = mocker.Mock()
flags_mock = mocker.Mock()
return_code = 0
mqtt_instance.on_connect(client_mock, userdata_mock, flags_mock, return_code)
client_mock.subscribe.assert_called_with(MQTT_TOPIC_VALVES)
def test_on_connect_starts_periodic_updates_thread(self, mocker):
"""
Test that MQTT OnConnect method starts periodic updates thread.
"""
mqtt_instance = Mqtt()
client_mock = mocker.Mock()
userdata_mock = mocker.Mock()
flags_mock = mocker.Mock()
return_code = 0
mqtt_instance.on_connect(client_mock, userdata_mock, flags_mock, return_code)
assert mqtt_instance.get_periodic_updates_thread().is_alive() is True
def test_on_message_handles_commands(self, mocker):
"""
Test that MQTT OnMessage method handles valves, config, command, and sys commands.
"""
mqtt_instance = Mqtt()
client_mock = mocker.Mock()
userdata_mock = mocker.Mock()
msg_mock = mocker.Mock()
msg_mock.topic = MQTT_TOPIC_CMD
msg_mock.payload.decode.return_value = '{"cmd": 1, "out": 1}'
mqtt_instance.on_message(client_mock, userdata_mock, msg_mock)
assert os.path.exists(STATUSES_FILE), f"The file '{STATUSES_FILE}' does not exist."
def test_mqtt_init(self, mocker):
"""
Test that MQTT Init method initializes MQTT client and connects to the broker.
"""
# Mock the necessary dependencies
mocker.patch("app.raspi.mqtt.logger")
mock_mqtt = mocker.patch("app.raspi.mqtt.mqtt.Client")
mock_client = mock_mqtt.return_value
mock_services = mocker.patch("app.raspi.mqtt.Services")
mock_services.return_value.load_program_cycles_if_exists.side_effect = [None, {"program": "data"}]
# Create an instance of Mqtt and call the mqtt_init method
mqtt_instance = Mqtt()
mqtt_instance.mqtt_init()
# Assert that the necessary methods were called | mock_mqtt.assert_called_with(client_id=MQTT_CLIENT_ID, clean_session=True) | 7 | 2023-12-22 08:06:09+00:00 | 12k |
bclavie/RAGatouille | ragatouille/RAGTrainer.py | [
{
"identifier": "LateInteractionModel",
"path": "ragatouille/models/base.py",
"snippet": "class LateInteractionModel(ABC):\n @abstractmethod\n def __init__(\n self,\n pretrained_model_name_or_path: Union[str, Path],\n n_gpu,\n ):\n ...\n\n @abstractmethod\n def... | from pathlib import Path
from typing import Union, Literal, Optional
from colbert.infra import ColBERTConfig
from ragatouille.models import LateInteractionModel, ColBERT
from ragatouille.negative_miners import HardNegativeMiner, SimpleMiner
from ragatouille.utils import seeded_shuffle
from ragatouille.data import TrainingDataProcessor | 7,432 |
class RAGTrainer:
"""Main trainer to fine-tune/train ColBERT models with a few lines."""
model: Union[LateInteractionModel, None] = None
negative_miner: Union[HardNegativeMiner, None] = None
collection: list[str] = []
queries: Union[list[str], None] = None
raw_data: Union[list[tuple], list[list], None] = None
training_triplets: list[list[int]] = list()
def __init__(
self,
model_name: str,
pretrained_model_name: str,
language_code: str = "en",
n_usable_gpus: int = -1,
):
"""
Initialise a RAGTrainer instance. This will load a base model: either an existing ColBERT model to fine-tune or a BERT/RoBERTa-like model to build a new ColBERT model from.
Parameters:
model_name: str - Name of the model to train. This will be used to name the checkpoints and the index.
pretrained_model_name: str - Name of the pretrained model to use as a base. Can be a local path to a checkpoint or a huggingface model name.
language_code: str - Language code of the model to train. This will be used to name the checkpoints and the index.
n_usable_gpus: int - Number of GPUs to use. By default, value is -1, which means use all available GPUs or none if no GPU is available.
Returns:
self (RAGTrainer): The current instance of RAGTrainer, with the base model initialised.
"""
self.model_name = model_name
self.pretrained_model_name = pretrained_model_name
self.language_code = language_code
self.model = ColBERT(
pretrained_model_name_or_path=pretrained_model_name, n_gpu=n_usable_gpus
)
def add_documents(self, documents: list[str]):
self.collection += documents
seeded_shuffle(self.collection)
def export_training_data(self, path: Union[str, Path]):
"""
Manually export the training data processed by prepare_training_data to a given path.
Parameters:
path: Union[str, Path] - Path to the directory where the data will be exported."""
self.data_processor.export_training_data(path)
def prepare_training_data(
self,
raw_data: Union[list[tuple], list[list]],
all_documents: Optional[list[str]] = None,
data_out_path: Union[str, Path] = "./data/",
num_new_negatives: int = 10,
hard_negative_minimum_rank: int = 10,
mine_hard_negatives: bool = True,
hard_negative_model_size: str = "small",
pairs_with_labels: bool = False,
positive_label: Union[int, str] = 1,
negative_label: Union[int, str] = 0,
) -> str:
"""
Fully pre-process input-data in various raw formats into ColBERT-ready files and triplets.
Will accept a variety of formats, such as unannotated pairs, annotated pairs, triplets of strings and triplets of list of strings.
Will process into a ColBERT-ready format and export to data_out_path.
Will generate hard negatives if mine_hard_negatives is True.
num_new_negatives decides how many negatives will be generated. if mine_hard_negatives is False and num_new_negatives is > 0, these negatives will be randomly sampled.
Parameters:
raw_data: Union[list[tuple], list[list]] - List of pairs, annotated pairs, or triplets of strings.
all_documents: Optional[list[str]] - A corpus of documents to be used for sampling negatives.
data_out_path: Union[str, Path] - Path to the directory where the data will be exported (can be a tmp directory).
num_new_negatives: int - Number of new negatives to generate for each query.
mine_hard_negatives: bool - Whether to use hard negatives mining or not.
hard_negative_model_size: str - Size of the model to use for hard negatives mining.
pairs_with_labels: bool - Whether the raw_data is a list of pairs with labels or not.
positive_label: Union[int, str] - Label to use for positive pairs.
negative_label: Union[int, str] - Label to use for negative pairs.
Returns:
data_out_path: Union[str, Path] - Path to the directory where the data has been exported.
"""
if all_documents is not None:
self.collection += all_documents
self.data_dir = Path(data_out_path)
if len(raw_data[0]) == 2:
data_type = "pairs"
if pairs_with_labels:
data_type = "labeled_pairs"
elif len(raw_data[0]) == 3:
data_type = "triplets"
else:
raise ValueError("Raw data must be a list of pairs or triplets of strings.")
self.collection += [x[1] for x in raw_data]
if data_type == "triplets":
self.collection += [x[2] for x in raw_data]
self.queries = set([x[0] for x in raw_data])
self.collection = list(set(self.collection))
seeded_shuffle(self.collection)
if mine_hard_negatives:
|
class RAGTrainer:
"""Main trainer to fine-tune/train ColBERT models with a few lines."""
model: Union[LateInteractionModel, None] = None
negative_miner: Union[HardNegativeMiner, None] = None
collection: list[str] = []
queries: Union[list[str], None] = None
raw_data: Union[list[tuple], list[list], None] = None
training_triplets: list[list[int]] = list()
def __init__(
self,
model_name: str,
pretrained_model_name: str,
language_code: str = "en",
n_usable_gpus: int = -1,
):
"""
Initialise a RAGTrainer instance. This will load a base model: either an existing ColBERT model to fine-tune or a BERT/RoBERTa-like model to build a new ColBERT model from.
Parameters:
model_name: str - Name of the model to train. This will be used to name the checkpoints and the index.
pretrained_model_name: str - Name of the pretrained model to use as a base. Can be a local path to a checkpoint or a huggingface model name.
language_code: str - Language code of the model to train. This will be used to name the checkpoints and the index.
n_usable_gpus: int - Number of GPUs to use. By default, value is -1, which means use all available GPUs or none if no GPU is available.
Returns:
self (RAGTrainer): The current instance of RAGTrainer, with the base model initialised.
"""
self.model_name = model_name
self.pretrained_model_name = pretrained_model_name
self.language_code = language_code
self.model = ColBERT(
pretrained_model_name_or_path=pretrained_model_name, n_gpu=n_usable_gpus
)
def add_documents(self, documents: list[str]):
self.collection += documents
seeded_shuffle(self.collection)
def export_training_data(self, path: Union[str, Path]):
"""
Manually export the training data processed by prepare_training_data to a given path.
Parameters:
path: Union[str, Path] - Path to the directory where the data will be exported."""
self.data_processor.export_training_data(path)
def prepare_training_data(
self,
raw_data: Union[list[tuple], list[list]],
all_documents: Optional[list[str]] = None,
data_out_path: Union[str, Path] = "./data/",
num_new_negatives: int = 10,
hard_negative_minimum_rank: int = 10,
mine_hard_negatives: bool = True,
hard_negative_model_size: str = "small",
pairs_with_labels: bool = False,
positive_label: Union[int, str] = 1,
negative_label: Union[int, str] = 0,
) -> str:
"""
Fully pre-process input-data in various raw formats into ColBERT-ready files and triplets.
Will accept a variety of formats, such as unannotated pairs, annotated pairs, triplets of strings and triplets of list of strings.
Will process into a ColBERT-ready format and export to data_out_path.
Will generate hard negatives if mine_hard_negatives is True.
num_new_negatives decides how many negatives will be generated. if mine_hard_negatives is False and num_new_negatives is > 0, these negatives will be randomly sampled.
Parameters:
raw_data: Union[list[tuple], list[list]] - List of pairs, annotated pairs, or triplets of strings.
all_documents: Optional[list[str]] - A corpus of documents to be used for sampling negatives.
data_out_path: Union[str, Path] - Path to the directory where the data will be exported (can be a tmp directory).
num_new_negatives: int - Number of new negatives to generate for each query.
mine_hard_negatives: bool - Whether to use hard negatives mining or not.
hard_negative_model_size: str - Size of the model to use for hard negatives mining.
pairs_with_labels: bool - Whether the raw_data is a list of pairs with labels or not.
positive_label: Union[int, str] - Label to use for positive pairs.
negative_label: Union[int, str] - Label to use for negative pairs.
Returns:
data_out_path: Union[str, Path] - Path to the directory where the data has been exported.
"""
if all_documents is not None:
self.collection += all_documents
self.data_dir = Path(data_out_path)
if len(raw_data[0]) == 2:
data_type = "pairs"
if pairs_with_labels:
data_type = "labeled_pairs"
elif len(raw_data[0]) == 3:
data_type = "triplets"
else:
raise ValueError("Raw data must be a list of pairs or triplets of strings.")
self.collection += [x[1] for x in raw_data]
if data_type == "triplets":
self.collection += [x[2] for x in raw_data]
self.queries = set([x[0] for x in raw_data])
self.collection = list(set(self.collection))
seeded_shuffle(self.collection)
if mine_hard_negatives: | self.negative_miner = SimpleMiner( | 3 | 2023-12-29 16:26:42+00:00 | 12k |
shibing624/chatgpt-webui | main.py | [
{
"identifier": "http_proxy",
"path": "src/config.py",
"snippet": "def retrieve_openai_api(api_key=None):\ndef retrieve_proxy(proxy=None):\ndef update_doc_config(two_column_pdf):"
},
{
"identifier": "get_model",
"path": "src/models.py",
"snippet": "def get_model(\n model_name,\n ... | import gradio as gr
from loguru import logger
from src.config import (
http_proxy,
hide_history_when_not_logged_in,
chat_name_method_index,
my_api_key, multi_api_key, server_name,
server_port, share, config_file, api_host,
authflag,
dockerflag,
show_api_billing,
latex_delimiters_set,
user_avatar, bot_avatar,
update_doc_config,
)
from src.models import get_model
from src.overwrites import (
postprocess, postprocess_chat_messages,
reload_javascript, get_html,
)
from src.presets import (
MODELS,
HISTORY_NAME_METHODS,
small_and_beautiful_theme,
CONCURRENT_COUNT,
CHUANHU_TITLE,
HIDE_MY_KEY,
DEFAULT_MODEL,
REPLY_LANGUAGES,
INITIAL_SYSTEM_PROMPT,
ENABLE_STREAMING_OPTION,
CHUANHU_DESCRIPTION,
favicon_path,
API_HOST,
HISTORY_DIR,
assets_path,
)
from src.utils import (
delete_chat_history,
filter_history,
get_history_list,
auto_name_chat_history,
get_template_dropdown,
rename_chat_history,
init_history_list,
get_first_history_name,
setup_wizard,
auth_from_conf,
get_geoip,
get_template_names,
load_template,
get_history_names,
reset,
predict,
interrupt,
retry,
i18n,
dislike,
toggle_like_btn_visibility,
set_key,
set_single_turn,
hide_middle_chars,
set_system_prompt,
start_outputing,
set_token_upper_limit,
set_temperature,
set_user_identifier,
set_top_p,
delete_first_conversation,
delete_last_conversation,
set_n_choices,
set_logit_bias,
load_chat_history,
end_outputing,
set_max_tokens,
reset_default,
reset_textbox,
set_stop_sequence,
set_presence_penalty, set_frequency_penalty,
upload_chat_history,
export_markdown,
billing_info,
get_template_content,
like,
transfer_input,
handle_file_upload,
handle_summarize_index,
) | 7,899 | regenerate_i18n=i18n("重新生成"),
deleteRound_i18n=i18n("删除这轮问答"),
renameChat_i18n=i18n("重命名该对话"),
validFileName_i18n=i18n("请输入有效的文件名,不要包含以下特殊字符:"),
clearFileHistoryMsg_i18n=i18n("⚠️请先删除知识库中的历史文件,再尝试上传!"),
dropUploadMsg_i18n=i18n("释放文件以上传"),
))
with gr.Box(elem_id="fake-gradio-components", visible=False):
changeSingleSessionBtn = gr.Button(
visible=False, elem_classes="invisible-btn", elem_id="change-single-session-btn")
historySelectBtn = gr.Button(
visible=False, elem_classes="invisible-btn", elem_id="history-select-btn") # Not used
def create_greeting(request: gr.Request):
if hasattr(request, "username") and request.username:
logger.info(f"Get User Name: {request.username}")
user_info, user_name = gr.Markdown.update(
value=f"User: {request.username}"), request.username
else:
user_info, user_name = gr.Markdown.update(
value=f"", visible=False), ""
current_model = get_model(
model_name=MODELS[DEFAULT_MODEL], access_key=my_api_key, user_name=user_name)[0]
if not hide_history_when_not_logged_in or user_name:
loaded_stuff = current_model.auto_load()
else:
loaded_stuff = [gr.update(), gr.update(), gr.Chatbot.update(label=MODELS[DEFAULT_MODEL]),
current_model.single_turn, current_model.temperature, current_model.top_p,
current_model.n_choices, current_model.stop_sequence, current_model.token_upper_limit,
current_model.max_generation_token, current_model.presence_penalty,
current_model.frequency_penalty, current_model.logit_bias, current_model.user_identifier]
return user_info, user_name, current_model, toggle_like_btn_visibility(
DEFAULT_MODEL), *loaded_stuff, init_history_list(user_name)
demo.load(create_greeting, inputs=None, outputs=[
user_info, user_name, current_model, like_dislike_area, saveFileName, systemPromptTxt, chatbot,
single_turn_checkbox, temperature_slider, top_p_slider, n_choices_slider, stop_sequence_txt,
max_context_length_slider, max_generation_slider, presence_penalty_slider, frequency_penalty_slider,
logit_bias_txt, user_identifier_txt, historySelectList], api_name="load")
chatgpt_predict_args = dict(
fn=predict,
inputs=[
current_model,
user_question,
chatbot,
use_streaming_checkbox,
use_websearch_checkbox,
index_files,
language_select_dropdown,
],
outputs=[chatbot, status_display],
show_progress=True,
)
start_outputing_args = dict(
fn=start_outputing,
inputs=[],
outputs=[submitBtn, cancelBtn],
show_progress=True,
)
end_outputing_args = dict(
fn=end_outputing, inputs=[], outputs=[submitBtn, cancelBtn]
)
reset_textbox_args = dict(
fn=reset_textbox, inputs=[], outputs=[user_input]
)
transfer_input_args = dict(
fn=transfer_input, inputs=[user_input], outputs=[
user_question, user_input, submitBtn, cancelBtn], show_progress=True
)
get_usage_args = dict(
fn=billing_info, inputs=[current_model], outputs=[
usageTxt], show_progress=False
)
load_history_from_file_args = dict(
fn=load_chat_history,
inputs=[current_model, historySelectList],
outputs=[saveFileName, systemPromptTxt, chatbot, single_turn_checkbox, temperature_slider, top_p_slider,
n_choices_slider, stop_sequence_txt, max_context_length_slider, max_generation_slider,
presence_penalty_slider, frequency_penalty_slider, logit_bias_txt, user_identifier_txt],
)
refresh_history_args = dict(
fn=get_history_list, inputs=[user_name], outputs=[historySelectList]
)
auto_name_chat_history_args = dict(
fn=auto_name_chat_history,
inputs=[current_model, name_chat_method, user_question, chatbot, single_turn_checkbox],
outputs=[historySelectList],
show_progress=False,
)
# Chatbot
cancelBtn.click(interrupt, [current_model], [])
user_input.submit(
**transfer_input_args).then(
**chatgpt_predict_args).then(
**end_outputing_args).then(
**auto_name_chat_history_args)
user_input.submit(**get_usage_args)
submitBtn.click(**transfer_input_args).then(
**chatgpt_predict_args, api_name="predict").then(
**end_outputing_args).then(
**auto_name_chat_history_args)
submitBtn.click(**get_usage_args)
index_files.upload(handle_file_upload, [current_model, index_files, chatbot, language_select_dropdown], [
index_files, chatbot, status_display])
summarize_btn.click(handle_summarize_index, [
current_model, index_files, chatbot, language_select_dropdown], [chatbot, status_display])
emptyBtn.click(
| # -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com)
@description:
"""
gr.Chatbot._postprocess_chat_messages = postprocess_chat_messages
gr.Chatbot.postprocess = postprocess
with gr.Blocks(theme=small_and_beautiful_theme) as demo:
user_name = gr.Textbox("", visible=False)
promptTemplates = gr.State(load_template(get_template_names()[0], mode=2))
user_question = gr.State("")
assert type(my_api_key) == str
user_api_key = gr.State(my_api_key)
current_model = gr.State()
topic = gr.State(i18n("未命名对话历史记录"))
with gr.Row(elem_id="chuanhu-header"):
gr.HTML(get_html("header_title.html").format(
app_title=CHUANHU_TITLE), elem_id="app-title")
status_display = gr.Markdown(get_geoip, elem_id="status-display")
with gr.Row(elem_id="float-display"):
user_info = gr.Markdown(
value="getting user info...", elem_id="user-info")
with gr.Row(equal_height=True, elem_id="chuanhu-body"):
with gr.Column(elem_id="menu-area"):
with gr.Column(elem_id="chuanhu-history"):
with gr.Box():
with gr.Row(elem_id="chuanhu-history-header"):
with gr.Row(elem_id="chuanhu-history-search-row"):
with gr.Column(min_width=150, scale=2):
historySearchTextbox = gr.Textbox(show_label=False, container=False, placeholder=i18n(
"搜索(支持正则)..."), lines=1, elem_id="history-search-tb")
with gr.Column(min_width=52, scale=1, elem_id="gr-history-header-btns"):
uploadFileBtn = gr.UploadButton(
interactive=True, label="", file_types=[".json"], elem_id="gr-history-upload-btn")
historyRefreshBtn = gr.Button("", elem_id="gr-history-refresh-btn")
with gr.Row(elem_id="chuanhu-history-body"):
with gr.Column(scale=6, elem_id="history-select-wrap"):
historySelectList = gr.Radio(
label=i18n("从列表中加载对话"),
choices=get_history_names(),
value=get_first_history_name(),
# multiselect=False,
container=False,
elem_id="history-select-dropdown"
)
with gr.Row(visible=False):
with gr.Column(min_width=42, scale=1):
historyDeleteBtn = gr.Button(
"🗑️", elem_id="gr-history-delete-btn")
with gr.Column(min_width=42, scale=1):
historyDownloadBtn = gr.Button(
"⏬", elem_id="gr-history-download-btn")
with gr.Column(min_width=42, scale=1):
historyMarkdownDownloadBtn = gr.Button(
"⤵️", elem_id="gr-history-mardown-download-btn")
with gr.Row(visible=False):
with gr.Column(scale=6):
saveFileName = gr.Textbox(
show_label=True,
placeholder=i18n("设置文件名: 默认为.json,可选为.md"),
label=i18n("设置保存文件名"),
value=i18n("对话历史记录"),
elem_classes="no-container"
# container=False,
)
with gr.Column(scale=1):
renameHistoryBtn = gr.Button(
i18n("💾 保存对话"), elem_id="gr-history-save-btn")
exportMarkdownBtn = gr.Button(
i18n("📝 导出为 Markdown"), elem_id="gr-markdown-export-btn")
with gr.Column(elem_id="chuanhu-menu-footer"):
with gr.Row(elem_id="chuanhu-func-nav"):
gr.HTML(get_html("func_nav.html"))
# gr.HTML(get_html("footer.html").format(versions=versions_html()), elem_id="footer")
# gr.Markdown(CHUANHU_DESCRIPTION, elem_id="chuanhu-author")
with gr.Column(elem_id="chuanhu-area", scale=5):
with gr.Column(elem_id="chatbot-area"):
with gr.Row(elem_id="chatbot-header"):
model_select_dropdown = gr.Dropdown(
label=i18n("选择模型"), choices=MODELS, multiselect=False, value=MODELS[DEFAULT_MODEL],
interactive=True,
show_label=False, container=False, elem_id="model-select-dropdown"
)
lora_select_dropdown = gr.Dropdown(
label=i18n("选择LoRA模型"), choices=[], multiselect=False, interactive=True,
container=False, visible=False,
)
gr.HTML(get_html("chatbot_header_btn.html").format(
json_label=i18n("历史记录(JSON)"),
md_label=i18n("导出为 Markdown")
), elem_id="chatbot-header-btn-bar")
with gr.Row():
chatbot = gr.Chatbot(
label="ChatGPT",
elem_id="chuanhu-chatbot",
latex_delimiters=latex_delimiters_set,
sanitize_html=False,
# height=700,
show_label=False,
avatar_images=[user_avatar, bot_avatar],
show_share_button=False,
)
with gr.Row(elem_id="chatbot-footer"):
with gr.Box(elem_id="chatbot-input-box"):
with gr.Row(elem_id="chatbot-input-row"):
gr.HTML(get_html("chatbot_more.html").format(
single_turn_label=i18n("单轮对话"),
websearch_label=i18n("在线搜索"),
upload_file_label=i18n("上传文件"),
uploaded_files_label=i18n("知识库文件"),
uploaded_files_tip=i18n("在工具箱中管理知识库文件")
))
with gr.Row(elem_id="chatbot-input-tb-row"):
with gr.Column(min_width=225, scale=12):
user_input = gr.Textbox(
elem_id="user-input-tb",
show_label=False,
placeholder=i18n("在这里输入"),
elem_classes="no-container",
max_lines=5,
# container=False
)
with gr.Column(min_width=42, scale=1, elem_id="chatbot-ctrl-btns"):
submitBtn = gr.Button(
value="", variant="primary", elem_id="submit-btn")
cancelBtn = gr.Button(
value="", variant="secondary", visible=False, elem_id="cancel-btn")
# Note: Buttons below are set invisible in UI. But they are used in JS.
with gr.Row(elem_id="chatbot-buttons", visible=False):
with gr.Column(min_width=120, scale=1):
emptyBtn = gr.Button(
i18n("🧹 新的对话"), elem_id="empty-btn"
)
with gr.Column(min_width=120, scale=1):
retryBtn = gr.Button(
i18n("🔄 重新生成"), elem_id="gr-retry-btn")
with gr.Column(min_width=120, scale=1):
delFirstBtn = gr.Button(i18n("🗑️ 删除最旧对话"))
with gr.Column(min_width=120, scale=1):
delLastBtn = gr.Button(
i18n("🗑️ 删除最新对话"), elem_id="gr-dellast-btn")
with gr.Row(visible=False) as like_dislike_area:
with gr.Column(min_width=20, scale=1):
likeBtn = gr.Button(
"👍", elem_id="gr-like-btn")
with gr.Column(min_width=20, scale=1):
dislikeBtn = gr.Button(
"👎", elem_id="gr-dislike-btn")
with gr.Column(elem_id="toolbox-area", scale=1):
# For CSS setting, there is an extra box. Don't remove it.
with gr.Box(elem_id="chuanhu-toolbox"):
with gr.Row():
gr.Markdown("## " + i18n("工具箱"))
gr.HTML(get_html("close_btn.html").format(
obj="toolbox"), elem_classes="close-btn")
with gr.Tabs(elem_id="chuanhu-toolbox-tabs"):
with gr.Tab(label=i18n("对话")):
with gr.Accordion(label=i18n("模型"), open=not HIDE_MY_KEY, visible=not HIDE_MY_KEY):
keyTxt = gr.Textbox(
show_label=True,
placeholder=f"Your API-key...",
value=hide_middle_chars(user_api_key.value),
type="password",
visible=not HIDE_MY_KEY,
label="API-Key",
)
if multi_api_key:
usageTxt = gr.Markdown(i18n(
"多账号模式已开启,无需输入key,可直接开始对话"), elem_id="usage-display",
elem_classes="insert-block", visible=show_api_billing)
else:
usageTxt = gr.Markdown(i18n(
"**发送消息** 或 **提交key** 以显示额度"), elem_id="usage-display",
elem_classes="insert-block", visible=show_api_billing)
gr.Markdown("---", elem_classes="hr-line", visible=not HIDE_MY_KEY)
with gr.Accordion(label="Prompt", open=True):
systemPromptTxt = gr.Textbox(
show_label=True,
placeholder=i18n("在这里输入System Prompt..."),
label="System prompt",
value=INITIAL_SYSTEM_PROMPT,
lines=8
)
retain_system_prompt_checkbox = gr.Checkbox(
label=i18n("新建对话保留Prompt"), value=False, visible=True,
elem_classes="switch-checkbox")
with gr.Accordion(label=i18n("加载Prompt模板"), open=False):
with gr.Column():
with gr.Row():
with gr.Column(scale=6):
templateFileSelectDropdown = gr.Dropdown(
label=i18n("选择Prompt模板集合文件"),
choices=get_template_names(),
multiselect=False,
value=get_template_names()[0],
container=False,
)
with gr.Column(scale=1):
templateRefreshBtn = gr.Button(
i18n("🔄 刷新"))
with gr.Row():
with gr.Column():
templateSelectDropdown = gr.Dropdown(
label=i18n("从Prompt模板中加载"),
choices=load_template(
get_template_names()[
0], mode=1
),
multiselect=False,
container=False,
)
gr.Markdown("---", elem_classes="hr-line")
with gr.Accordion(label=i18n("知识库"), open=True, elem_id="gr-kb-accordion", visible=True):
use_websearch_checkbox = gr.Checkbox(label=i18n(
"使用在线搜索"), value=False, elem_classes="switch-checkbox", elem_id="gr-websearch-cb",
visible=False)
index_files = gr.Files(label=i18n(
"上传"), type="file",
file_types=[".pdf", ".docx", ".pptx", ".epub", ".xlsx", ".txt", "text", "image"],
elem_id="upload-index-file")
two_column = gr.Checkbox(label=i18n(
"双栏pdf"), value=False)
summarize_btn = gr.Button(i18n("总结"), visible=False)
with gr.Tab(label=i18n("参数")):
gr.Markdown(i18n("# ⚠️ 务必谨慎更改 ⚠️"),
elem_id="advanced-warning")
with gr.Accordion(i18n("参数"), open=True):
temperature_slider = gr.Slider(
minimum=-0,
maximum=2.0,
value=1.0,
step=0.1,
interactive=True,
label="temperature",
)
top_p_slider = gr.Slider(
minimum=-0,
maximum=1.0,
value=1.0,
step=0.05,
interactive=True,
label="top-p",
)
n_choices_slider = gr.Slider(
minimum=1,
maximum=10,
value=1,
step=1,
interactive=True,
label="n choices",
)
stop_sequence_txt = gr.Textbox(
show_label=True,
placeholder=i18n("停止符,用英文逗号隔开..."),
label="stop",
value="",
lines=1,
)
max_context_length_slider = gr.Slider(
minimum=1,
maximum=32768,
value=2000,
step=1,
interactive=True,
label="max context",
)
max_generation_slider = gr.Slider(
minimum=1,
maximum=32768,
value=1000,
step=1,
interactive=True,
label="max generations",
)
presence_penalty_slider = gr.Slider(
minimum=-2.0,
maximum=2.0,
value=0.0,
step=0.01,
interactive=True,
label="presence penalty",
)
frequency_penalty_slider = gr.Slider(
minimum=-2.0,
maximum=2.0,
value=0.0,
step=0.01,
interactive=True,
label="frequency penalty",
)
logit_bias_txt = gr.Textbox(
show_label=True,
placeholder=f"word:likelihood",
label="logit bias",
value="",
lines=1,
)
user_identifier_txt = gr.Textbox(
show_label=True,
placeholder=i18n("用于定位滥用行为"),
label=i18n("用户标识符"),
value=user_name.value,
lines=1,
)
with gr.Tab(label=i18n("关于")):
gr.Markdown("#### " + i18n("ChatGPT WebUI"))
gr.Markdown(CHUANHU_DESCRIPTION)
with gr.Row(elem_id="popup-wrapper"):
with gr.Box(elem_id="chuanhu-popup"):
with gr.Box(elem_id="chuanhu-setting"):
with gr.Row():
gr.Markdown("## " + i18n("设置"))
gr.HTML(get_html("close_btn.html").format(
obj="box"), elem_classes="close-btn")
with gr.Tabs(elem_id="chuanhu-setting-tabs"):
with gr.Tab(label=i18n("高级")):
gr.HTML(get_html("appearance_switcher.html").format(
label=i18n("切换亮暗色主题")), elem_classes="insert-block", visible=False)
use_streaming_checkbox = gr.Checkbox(
label=i18n("实时传输回答"), value=True, visible=ENABLE_STREAMING_OPTION,
elem_classes="switch-checkbox"
)
language_select_dropdown = gr.Dropdown(
label=i18n("选择回复语言(针对搜索&索引功能)"),
choices=REPLY_LANGUAGES,
multiselect=False,
value=REPLY_LANGUAGES[0],
visible=False,
)
name_chat_method = gr.Dropdown(
label=i18n("对话命名方式"),
choices=HISTORY_NAME_METHODS,
multiselect=False,
interactive=True,
value=HISTORY_NAME_METHODS[chat_name_method_index],
)
single_turn_checkbox = gr.Checkbox(label=i18n(
"单轮对话"), value=False, elem_classes="switch-checkbox", elem_id="gr-single-session-cb",
visible=False)
# checkUpdateBtn = gr.Button(i18n("🔄 检查更新..."), visible=check_update)
with gr.Tab(i18n("网络")):
gr.Markdown(
i18n("⚠️ 为保证API-Key安全,请在配置文件`config.json`中修改网络设置"),
elem_id="netsetting-warning")
default_btn = gr.Button(i18n("🔙 恢复默认网络设置"))
# 网络代理
proxyTxt = gr.Textbox(
show_label=True,
placeholder=i18n("未设置代理..."),
label=i18n("代理地址"),
value=http_proxy,
lines=1,
interactive=False,
# container=False,
elem_classes="view-only-textbox no-container",
)
# changeProxyBtn = gr.Button(i18n("🔄 设置代理地址"))
# 优先展示自定义的api_host
apihostTxt = gr.Textbox(
show_label=True,
placeholder="api.openai.com",
label="OpenAI API-Host",
value=api_host or API_HOST,
lines=1,
interactive=False,
# container=False,
elem_classes="view-only-textbox no-container",
)
with gr.Tab(label=i18n("关于"), elem_id="about-tab"):
gr.Markdown("# " + i18n("ChatGPT WebUI"))
gr.Markdown(CHUANHU_DESCRIPTION, elem_id="description")
with gr.Box(elem_id="web-config", visible=False):
gr.HTML(get_html('web_config.html').format(
enableCheckUpdate_config=False,
hideHistoryWhenNotLoggedIn_config=hide_history_when_not_logged_in,
forView_i18n=i18n("仅供查看"),
deleteConfirm_i18n_pref=i18n("你真的要删除 "),
deleteConfirm_i18n_suff=i18n(" 吗?"),
usingLatest_i18n=i18n("您使用的就是最新版!"),
updatingMsg_i18n=i18n("正在尝试更新..."),
updateSuccess_i18n=i18n("更新成功,请重启本程序"),
updateFailure_i18n=i18n(
"更新失败,请尝试[手动更新](https://github.com/shibing624/chatgpt-webui/"),
regenerate_i18n=i18n("重新生成"),
deleteRound_i18n=i18n("删除这轮问答"),
renameChat_i18n=i18n("重命名该对话"),
validFileName_i18n=i18n("请输入有效的文件名,不要包含以下特殊字符:"),
clearFileHistoryMsg_i18n=i18n("⚠️请先删除知识库中的历史文件,再尝试上传!"),
dropUploadMsg_i18n=i18n("释放文件以上传"),
))
with gr.Box(elem_id="fake-gradio-components", visible=False):
changeSingleSessionBtn = gr.Button(
visible=False, elem_classes="invisible-btn", elem_id="change-single-session-btn")
historySelectBtn = gr.Button(
visible=False, elem_classes="invisible-btn", elem_id="history-select-btn") # Not used
def create_greeting(request: gr.Request):
if hasattr(request, "username") and request.username:
logger.info(f"Get User Name: {request.username}")
user_info, user_name = gr.Markdown.update(
value=f"User: {request.username}"), request.username
else:
user_info, user_name = gr.Markdown.update(
value=f"", visible=False), ""
current_model = get_model(
model_name=MODELS[DEFAULT_MODEL], access_key=my_api_key, user_name=user_name)[0]
if not hide_history_when_not_logged_in or user_name:
loaded_stuff = current_model.auto_load()
else:
loaded_stuff = [gr.update(), gr.update(), gr.Chatbot.update(label=MODELS[DEFAULT_MODEL]),
current_model.single_turn, current_model.temperature, current_model.top_p,
current_model.n_choices, current_model.stop_sequence, current_model.token_upper_limit,
current_model.max_generation_token, current_model.presence_penalty,
current_model.frequency_penalty, current_model.logit_bias, current_model.user_identifier]
return user_info, user_name, current_model, toggle_like_btn_visibility(
DEFAULT_MODEL), *loaded_stuff, init_history_list(user_name)
demo.load(create_greeting, inputs=None, outputs=[
user_info, user_name, current_model, like_dislike_area, saveFileName, systemPromptTxt, chatbot,
single_turn_checkbox, temperature_slider, top_p_slider, n_choices_slider, stop_sequence_txt,
max_context_length_slider, max_generation_slider, presence_penalty_slider, frequency_penalty_slider,
logit_bias_txt, user_identifier_txt, historySelectList], api_name="load")
chatgpt_predict_args = dict(
fn=predict,
inputs=[
current_model,
user_question,
chatbot,
use_streaming_checkbox,
use_websearch_checkbox,
index_files,
language_select_dropdown,
],
outputs=[chatbot, status_display],
show_progress=True,
)
start_outputing_args = dict(
fn=start_outputing,
inputs=[],
outputs=[submitBtn, cancelBtn],
show_progress=True,
)
end_outputing_args = dict(
fn=end_outputing, inputs=[], outputs=[submitBtn, cancelBtn]
)
reset_textbox_args = dict(
fn=reset_textbox, inputs=[], outputs=[user_input]
)
transfer_input_args = dict(
fn=transfer_input, inputs=[user_input], outputs=[
user_question, user_input, submitBtn, cancelBtn], show_progress=True
)
get_usage_args = dict(
fn=billing_info, inputs=[current_model], outputs=[
usageTxt], show_progress=False
)
load_history_from_file_args = dict(
fn=load_chat_history,
inputs=[current_model, historySelectList],
outputs=[saveFileName, systemPromptTxt, chatbot, single_turn_checkbox, temperature_slider, top_p_slider,
n_choices_slider, stop_sequence_txt, max_context_length_slider, max_generation_slider,
presence_penalty_slider, frequency_penalty_slider, logit_bias_txt, user_identifier_txt],
)
refresh_history_args = dict(
fn=get_history_list, inputs=[user_name], outputs=[historySelectList]
)
auto_name_chat_history_args = dict(
fn=auto_name_chat_history,
inputs=[current_model, name_chat_method, user_question, chatbot, single_turn_checkbox],
outputs=[historySelectList],
show_progress=False,
)
# Chatbot
cancelBtn.click(interrupt, [current_model], [])
user_input.submit(
**transfer_input_args).then(
**chatgpt_predict_args).then(
**end_outputing_args).then(
**auto_name_chat_history_args)
user_input.submit(**get_usage_args)
submitBtn.click(**transfer_input_args).then(
**chatgpt_predict_args, api_name="predict").then(
**end_outputing_args).then(
**auto_name_chat_history_args)
submitBtn.click(**get_usage_args)
index_files.upload(handle_file_upload, [current_model, index_files, chatbot, language_select_dropdown], [
index_files, chatbot, status_display])
summarize_btn.click(handle_summarize_index, [
current_model, index_files, chatbot, language_select_dropdown], [chatbot, status_display])
emptyBtn.click( | reset, | 7 | 2023-12-27 12:14:26+00:00 | 12k |
camenduru/AnyDoor-online-hf | ldm/models/diffusion/ddpm.py | [
{
"identifier": "log_txt_as_img",
"path": "ldm/util.py",
"snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ... | import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
import itertools
import torch.nn.functional as F
from torch.optim.lr_scheduler import LambdaLR
from einops import rearrange, repeat
from contextlib import contextmanager, nullcontext
from functools import partial
from tqdm import tqdm
from torchvision.utils import make_grid
from pytorch_lightning.utilities.distributed import rank_zero_only
from omegaconf import ListConfig
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from ldm.models.diffusion.ddim import DDIMSampler | 9,885 | """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.,
make_it_fit=False,
ucg_training=None,
reset_ema=False,
reset_num_ema_updates=False,
):
super().__init__()
assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
self.parameterization = parameterization
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
count_params(self.model, verbose=True)
self.use_ema = use_ema
if self.use_ema:
self.model_ema = LitEma(self.model)
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
self.use_scheduler = scheduler_config is not None
if self.use_scheduler:
self.scheduler_config = scheduler_config
self.v_posterior = v_posterior
self.original_elbo_weight = original_elbo_weight
self.l_simple_weight = l_simple_weight
if monitor is not None:
self.monitor = monitor
self.make_it_fit = make_it_fit
| """
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.,
conditioning_key=None,
parameterization="eps", # all assuming fixed variance schedules
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.,
make_it_fit=False,
ucg_training=None,
reset_ema=False,
reset_num_ema_updates=False,
):
super().__init__()
assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"'
self.parameterization = parameterization
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
count_params(self.model, verbose=True)
self.use_ema = use_ema
if self.use_ema:
self.model_ema = LitEma(self.model)
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
self.use_scheduler = scheduler_config is not None
if self.use_scheduler:
self.scheduler_config = scheduler_config
self.v_posterior = v_posterior
self.original_elbo_weight = original_elbo_weight
self.l_simple_weight = l_simple_weight
if monitor is not None:
self.monitor = monitor
self.make_it_fit = make_it_fit | if reset_ema: assert exists(ckpt_path) | 1 | 2023-12-25 04:48:34+00:00 | 12k |
pangxincheng/TaskManager | task_manager/main.py | [
{
"identifier": "CoreManager",
"path": "task_manager/manager/core.py",
"snippet": "class CoreManager(mp.Process):\n\n def __init__(\n self,\n core_manager_addr: str,\n gpu_manager_addr: str=\"ipc://gpu_manager\",\n task_manager_addr: str=\"ipc://task_manager\",\n lo... | import os
import sys
import rich
import time
import argparse
import multiprocessing as mp
import task_manager.utils.common_utils as common_utils
from task_manager.manager.core import CoreManager
from task_manager.controller.cli_controller import CLIController | 8,115 |
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method("spawn")
else:
assert mp.get_start_method() == "spawn", "Only support spawn start method"
def parse_args():
identity_id = common_utils.md5(str(time.time()))
parser = argparse.ArgumentParser()
parser.add_argument("--log_dir", default="logs", help="Log dir")
parser.add_argument("--log_level", default="INFO", help="Log level")
parser.add_argument("--web_controller", action="store_true", help="Whether start web gui to watch GPU usage&Tasks")
parser.add_argument(
"--core_manager_addr",
type=str,
default=f"ipc:///tmp/core_manager-{identity_id}.sock",
help="Address to run Core manager on"
)
parser.add_argument(
"--gpu_manager_addr",
type=str,
default=f"ipc:///tmp/gpu_manager-{identity_id}.sock",
help="Address to run GPU manager on"
)
parser.add_argument(
"--task_manager_addr",
type=str,
default=f"ipc:///tmp/task_manager-{identity_id}.sock",
help="Address to run Task manager on"
)
args = parser.parse_args()
os.makedirs(args.log_dir, exist_ok=True)
sys.argv = sys.argv[:1]
return args
def start_core_manager(args):
|
if mp.get_start_method(allow_none=True) is None:
mp.set_start_method("spawn")
else:
assert mp.get_start_method() == "spawn", "Only support spawn start method"
def parse_args():
identity_id = common_utils.md5(str(time.time()))
parser = argparse.ArgumentParser()
parser.add_argument("--log_dir", default="logs", help="Log dir")
parser.add_argument("--log_level", default="INFO", help="Log level")
parser.add_argument("--web_controller", action="store_true", help="Whether start web gui to watch GPU usage&Tasks")
parser.add_argument(
"--core_manager_addr",
type=str,
default=f"ipc:///tmp/core_manager-{identity_id}.sock",
help="Address to run Core manager on"
)
parser.add_argument(
"--gpu_manager_addr",
type=str,
default=f"ipc:///tmp/gpu_manager-{identity_id}.sock",
help="Address to run GPU manager on"
)
parser.add_argument(
"--task_manager_addr",
type=str,
default=f"ipc:///tmp/task_manager-{identity_id}.sock",
help="Address to run Task manager on"
)
args = parser.parse_args()
os.makedirs(args.log_dir, exist_ok=True)
sys.argv = sys.argv[:1]
return args
def start_core_manager(args): | core_manager = CoreManager( | 0 | 2023-12-30 11:47:06+00:00 | 12k |
Shaokang-Agent/S2L | marlgrid/base.py | [
{
"identifier": "WorldObj",
"path": "marlgrid/objects.py",
"snippet": "class WorldObj(metaclass=RegisteredObjectType):\n def __init__(self, color=\"worst\", state=0):\n self.color = color\n self.state = state\n self.contains = None\n\n self.agents = [] # Some objects can h... | import gym
import numpy as np
import gym_minigrid
import math
import warnings
import pdb; pdb.set_trace()
from enum import IntEnum
from .objects import WorldObj, Wall, Goal, Lava, GridAgent, BonusTile, BulkObj, COLORS
from .agents import GridAgentInterface
from .rendering import SimpleImageViewer
from gym_minigrid.rendering import fill_coords, point_in_rect, downsample, highlight_img | 8,106 | if not agent.active:
# below, not sure orientation is correct but as of 6/27/2020 that doesn't matter because
# agent views are usually square and this grid won't be used for anything.
grid = MultiGrid((agent.view_size, agent.view_size), orientation=agent.dir+1)
vis_mask = np.zeros((agent.view_size, agent.view_size), dtype=np.bool)
return grid, vis_mask
topX, topY, botX, botY = agent.get_view_exts()
grid = self.grid.slice(
topX, topY, agent.view_size, agent.view_size, rot_k=agent.dir + 1
)
# Process occluders and visibility
# Note that this incurs some slight performance cost
vis_mask = agent.process_vis(grid.opacity)
# Warning about the rest of the function:
# Allows masking away objects that the agent isn't supposed to see.
# But breaks consistency between the states of the grid objects in the parial views
# and the grid objects overall.
if len(getattr(agent, 'hide_item_types', []))>0:
for i in range(grid.width):
for j in range(grid.height):
item = grid.get(i,j)
if (item is not None) and (item is not agent) and (item.type in agent.hide_item_types):
if len(item.agents) > 0:
grid.set(i,j,item.agents[0])
else:
grid.set(i,j,None)
return grid, vis_mask
def gen_agent_obs(self, agent):
"""
Generate the agent's view (partially observable, low-resolution encoding)
"""
grid, vis_mask = self.gen_obs_grid(agent)
grid_image = grid.render(tile_size=agent.view_tile_size, visible_mask=vis_mask, top_agent=agent)
if agent.observation_style=='image':
return grid_image
else:
ret = {'pov': grid_image}
if agent.observe_rewards:
ret['reward'] = getattr(agent, 'step_reward', 0)
if agent.observe_position:
agent_pos = agent.pos if agent.pos is not None else (0,0)
ret['position'] = np.array(agent_pos)/np.array([self.width, self.height], dtype=np.float)
if agent.observe_orientation:
agent_dir = agent.dir if agent.dir is not None else 0
ret['orientation'] = agent_dir
return ret
def gen_obs(self):
return [self.gen_agent_obs(agent) for agent in self.agents]
def __str__(self):
return self.grid.__str__()
def check_agent_position_integrity(self, title=''):
'''
This function checks whether each agent is present in the grid in exactly one place.
This is particularly helpful for validating the world state when ghost_mode=False and
agents can stack, since the logic for moving them around gets a bit messy.
Prints a message and drops into pdb if there's an inconsistency.
'''
agent_locs = [[] for _ in range(len(self.agents))]
for i in range(self.grid.width):
for j in range(self.grid.height):
x = self.grid.get(i,j)
for k,agent in enumerate(self.agents):
if x==agent:
agent_locs[k].append(('top', (i,j)))
if hasattr(x, 'agents') and agent in x.agents:
agent_locs[k].append(('stacked', (i,j)))
if not all([len(x)==1 for x in agent_locs]):
print(f"{title} > Failed integrity test!")
for a, al in zip(self.agents, agent_locs):
print(" > ", a.color,'-', al)
def step(self, actions):
# Spawn agents if it's time.
for agent in self.agents:
if not agent.active and not agent.done and self.step_count >= agent.spawn_delay:
self.place_obj(agent, **self.agent_spawn_kwargs)
agent.activate()
assert len(actions) == len(self.agents)
step_rewards = np.zeros((len(self.agents,)), dtype=np.float)
self.step_count += 1
iter_agents = list(enumerate(zip(self.agents, actions)))
iter_order = np.arange(len(iter_agents))
self.np_random.shuffle(iter_order)
for shuffled_ix in iter_order:
agent_no, (agent, action) = iter_agents[shuffled_ix]
agent.step_reward = 0
if agent.active:
cur_pos = agent.pos[:]
cur_cell = self.grid.get(*cur_pos)
fwd_pos = agent.front_pos[:]
fwd_cell = self.grid.get(*fwd_pos)
agent_moved = False
# Rotate left
if action == agent.actions.left:
agent.dir = (agent.dir - 1) % 4
# Rotate right
elif action == agent.actions.right:
agent.dir = (agent.dir + 1) % 4
# Move forward
elif action == agent.actions.forward:
# Under the follow conditions, the agent can move forward.
can_move = fwd_cell is None or fwd_cell.can_overlap()
| # Multi-agent gridworld.
# Based on MiniGrid: https://github.com/maximecb/gym-minigrid.
TILE_PIXELS = 32
class ObjectRegistry:
'''
This class contains dicts that map objects to numeric keys and vise versa.
Used so that grid worlds can represent objects using numerical arrays rather
than lists of lists of generic objects.
'''
def __init__(self, objs=[], max_num_objects=1000):
self.key_to_obj_map = {}
self.obj_to_key_map = {}
self.max_num_objects = max_num_objects
for obj in objs:
self.add_object(obj)
def get_next_key(self):
for k in range(self.max_num_objects):
if k not in self.key_to_obj_map:
break
else:
raise ValueError("Object registry full.")
return k
def __len__(self):
return len(self.id_to_obj_map)
def add_object(self, obj):
new_key = self.get_next_key()
self.key_to_obj_map[new_key] = obj
self.obj_to_key_map[obj] = new_key
return new_key
def contains_object(self, obj):
return obj in self.obj_to_key_map
def contains_key(self, key):
return key in self.key_to_obj_map
def get_key(self, obj):
if obj in self.obj_to_key_map:
return self.obj_to_key_map[obj]
else:
return self.add_object(obj)
# 5/4/2020 This gets called A LOT. Replaced calls to this function with direct dict gets
# in an attempt to speed things up. Probably didn't make a big difference.
def obj_of_key(self, key):
return self.key_to_obj_map[key]
def rotate_grid(grid, rot_k):
'''
This function basically replicates np.rot90 (with the correct args for rotating images).
But it's faster.
'''
rot_k = rot_k % 4
if rot_k==3:
return np.moveaxis(grid[:,::-1], 0, 1)
elif rot_k==1:
return np.moveaxis(grid[::-1,:], 0, 1)
elif rot_k==2:
return grid[::-1,::-1]
else:
return grid
class MultiGrid:
tile_cache = {}
def __init__(self, shape, obj_reg=None, orientation=0):
self.orientation = orientation
if isinstance(shape, tuple):
self.width, self.height = shape
self.grid = np.zeros((self.width, self.height), dtype=np.uint8) # w,h
elif isinstance(shape, np.ndarray):
self.width, self.height = shape.shape
self.grid = shape
else:
raise ValueError("Must create grid from shape tuple or array.")
if self.width < 3 or self.height < 3:
raise ValueError("Grid needs width, height >= 3")
self.obj_reg = ObjectRegistry(objs=[None]) if obj_reg is None else obj_reg
@property
def opacity(self):
transparent_fun = np.vectorize(lambda k: (self.obj_reg.key_to_obj_map[k].see_behind() if hasattr(self.obj_reg.key_to_obj_map[k], 'see_behind') else True))
return ~transparent_fun(self.grid)
def __getitem__(self, *args, **kwargs):
return self.__class__(
np.ndarray.__getitem__(self.grid, *args, **kwargs),
obj_reg=self.obj_reg,
orientation=self.orientation,
)
def rotate_left(self, k=1):
return self.__class__(
rotate_grid(self.grid, rot_k=k), # np.rot90(self.grid, k=k),
obj_reg=self.obj_reg,
orientation=(self.orientation - k) % 4,
)
def slice(self, topX, topY, width, height, rot_k=0):
"""
Get a subset of the grid
"""
sub_grid = self.__class__(
(width, height),
obj_reg=self.obj_reg,
orientation=(self.orientation - rot_k) % 4,
)
x_min = max(0, topX)
x_max = min(topX + width, self.width)
y_min = max(0, topY)
y_max = min(topY + height, self.height)
x_offset = x_min - topX
y_offset = y_min - topY
sub_grid.grid[
x_offset : x_max - x_min + x_offset, y_offset : y_max - y_min + y_offset
] = self.grid[x_min:x_max, y_min:y_max]
sub_grid.grid = rotate_grid(sub_grid.grid, rot_k)
sub_grid.width, sub_grid.height = sub_grid.grid.shape
return sub_grid
def set(self, i, j, obj):
assert i >= 0 and i < self.width
assert j >= 0 and j < self.height
self.grid[i, j] = self.obj_reg.get_key(obj)
def get(self, i, j):
assert i >= 0 and i < self.width
assert j >= 0 and j < self.height
return self.obj_reg.key_to_obj_map[self.grid[i, j]]
def horz_wall(self, x, y, length=None, obj_type=Wall):
if length is None:
length = self.width - x
for i in range(0, length):
self.set(x + i, y, obj_type())
def vert_wall(self, x, y, length=None, obj_type=Wall):
if length is None:
length = self.height - y
for j in range(0, length):
self.set(x, y + j, obj_type())
def wall_rect(self, x, y, w, h, obj_type=Wall):
self.horz_wall(x, y, w, obj_type=obj_type)
self.horz_wall(x, y + h - 1, w, obj_type=obj_type)
self.vert_wall(x, y, h, obj_type=obj_type)
self.vert_wall(x + w - 1, y, h, obj_type=obj_type)
def __str__(self):
render = (
lambda x: " "
if x is None or not hasattr(x, "str_render")
else x.str_render(dir=self.orientation)
)
hstars = "*" * (2 * self.width + 2)
return (
hstars
+ "\n"
+ "\n".join(
"*" + "".join(render(self.get(i, j)) for i in range(self.width)) + "*"
for j in range(self.height)
)
+ "\n"
+ hstars
)
def encode(self, vis_mask=None):
"""
Produce a compact numpy encoding of the grid
"""
if vis_mask is None:
vis_mask = np.ones((self.width, self.height), dtype=bool)
array = np.zeros((self.width, self.height, 3), dtype="uint8")
for i in range(self.width):
for j in range(self.height):
if vis_mask[i, j]:
v = self.get(i, j)
if v is None:
array[i, j, :] = 0
else:
array[i, j, :] = v.encode()
return array
@classmethod
def decode(cls, array):
raise NotImplementedError
width, height, channels = array.shape
assert channels == 3
vis_mask[i, j] = np.ones(shape=(width, height), dtype=np.bool)
grid = cls((width, height))
@classmethod
def cache_render_fun(cls, key, f, *args, **kwargs):
if key not in cls.tile_cache:
cls.tile_cache[key] = f(*args, **kwargs)
return np.copy(cls.tile_cache[key])
@classmethod
def cache_render_obj(cls, obj, tile_size, subdivs):
if obj is None:
return cls.cache_render_fun((tile_size, None), cls.empty_tile, tile_size, subdivs)
else:
img = cls.cache_render_fun(
(tile_size, obj.__class__.__name__, *obj.encode()),
cls.render_object, obj, tile_size, subdivs
)
if hasattr(obj, 'render_post'):
return obj.render_post(img)
else:
return img
@classmethod
def empty_tile(cls, tile_size, subdivs):
alpha = max(0, min(20, tile_size-10))
img = np.full((tile_size, tile_size, 3), alpha, dtype=np.uint8)
img[1:,:-1] = 0
return img
@classmethod
def render_object(cls, obj, tile_size, subdivs):
img = np.zeros((tile_size*subdivs,tile_size*subdivs, 3), dtype=np.uint8)
obj.render(img)
# if 'Agent' not in obj.type and len(obj.agents) > 0:
# obj.agents[0].render(img)
return downsample(img, subdivs).astype(np.uint8)
@classmethod
def blend_tiles(cls, img1, img2):
'''
This function renders one "tile" on top of another. Kinda janky, works surprisingly well.
Assumes img2 is a downscaled monochromatic with a black (0,0,0) background.
'''
alpha = img2.sum(2, keepdims=True)
max_alpha = alpha.max()
if max_alpha == 0:
return img1
return (
((img1 * (max_alpha-alpha))+(img2*alpha)
)/max_alpha
).astype(img1.dtype)
@classmethod
def render_tile(cls, obj, tile_size=TILE_PIXELS, subdivs=3, top_agent=None):
subdivs = 3
if obj is None:
img = cls.cache_render_obj(obj, tile_size, subdivs)
else:
if ('Agent' in obj.type) and (top_agent in obj.agents):
# If the tile is a stack of agents that includes the top agent, then just render the top agent.
img = cls.cache_render_obj(top_agent, tile_size, subdivs)
else:
# Otherwise, render (+ downsize) the item in the tile.
img = cls.cache_render_obj(obj, tile_size, subdivs)
# If the base obj isn't an agent but has agents on top, render an agent and blend it in.
if len(obj.agents)>0 and 'Agent' not in obj.type:
if top_agent in obj.agents:
img_agent = cls.cache_render_obj(top_agent, tile_size, subdivs)
else:
img_agent = cls.cache_render_obj(obj.agents[0], tile_size, subdivs)
img = cls.blend_tiles(img, img_agent)
# Render the tile border if any of the corners are black.
if (img[([0,0,-1,-1],[0,-1,0,-1])]==0).all(axis=-1).any():
img = img + cls.cache_render_fun((tile_size, None), cls.empty_tile, tile_size, subdivs)
return img
def render(self, tile_size, highlight_mask=None, visible_mask=None, top_agent=None):
width_px = self.width * tile_size
height_px = self.height * tile_size
img = np.zeros(shape=(height_px, width_px), dtype=np.uint8)[...,None]+COLORS['shadow']
for j in range(0, self.height):
for i in range(0, self.width):
if visible_mask is not None and not visible_mask[i,j]:
continue
obj = self.get(i, j)
tile_img = MultiGrid.render_tile(
obj,
tile_size=tile_size,
top_agent=top_agent
)
ymin = j * tile_size
ymax = (j + 1) * tile_size
xmin = i * tile_size
xmax = (i + 1) * tile_size
img[ymin:ymax, xmin:xmax, :] = rotate_grid(tile_img, self.orientation)
if highlight_mask is not None:
hm = np.kron(highlight_mask.T, np.full((tile_size, tile_size), 255, dtype=np.uint16)
)[...,None] # arcane magic.
img = np.right_shift(img.astype(np.uint16)*8+hm*2, 3).clip(0,255).astype(np.uint8)
return img
class MultiGridEnv(gym.Env):
def __init__(
self,
agents = [],
grid_size=None,
width=None,
height=None,
max_steps=100,
reward_decay=True,
seed=1337,
respawn=False,
ghost_mode=True,
agent_spawn_kwargs = {}
):
if grid_size is not None:
assert width == None and height == None
width, height = grid_size, grid_size
self.respawn = respawn
self.window = None
self.width = width
self.height = height
self.max_steps = max_steps
self.reward_decay = reward_decay
self.seed(seed=seed)
self.agent_spawn_kwargs = agent_spawn_kwargs
self.ghost_mode = ghost_mode
self.agents = []
for agent in agents:
self.add_agent(agent)
self.reset()
def seed(self, seed=1337):
# Seed the random number generator
self.np_random, _ = gym.utils.seeding.np_random(seed)
return [seed]
@property
def action_space(self):
return gym.spaces.Tuple(
[agent.action_space for agent in self.agents]
)
@property
def observation_space(self):
return gym.spaces.Tuple(
[agent.observation_space for agent in self.agents]
)
@property
def num_agents(self):
return len(self.agents)
def add_agent(self, agent_interface):
if isinstance(agent_interface, dict):
self.agents.append(GridAgentInterface(**agent_interface))
elif isinstance(agent_interface, GridAgentInterface):
self.agents.append(agent_interface)
else:
raise ValueError(
"To add an agent to a marlgrid environment, call add_agent with either a GridAgentInterface object "
" or a dictionary that can be used to initialize one.")
def reset(self, **kwargs):
for agent in self.agents:
agent.agents = []
agent.reset(new_episode=True)
self._gen_grid(self.width, self.height)
for agent in self.agents:
if agent.spawn_delay == 0:
self.place_obj(agent, **self.agent_spawn_kwargs)
agent.activate()
self.step_count = 0
obs = self.gen_obs()
return obs
def gen_obs_grid(self, agent):
# If the agent is inactive, return an empty grid and a visibility mask that hides everything.
if not agent.active:
# below, not sure orientation is correct but as of 6/27/2020 that doesn't matter because
# agent views are usually square and this grid won't be used for anything.
grid = MultiGrid((agent.view_size, agent.view_size), orientation=agent.dir+1)
vis_mask = np.zeros((agent.view_size, agent.view_size), dtype=np.bool)
return grid, vis_mask
topX, topY, botX, botY = agent.get_view_exts()
grid = self.grid.slice(
topX, topY, agent.view_size, agent.view_size, rot_k=agent.dir + 1
)
# Process occluders and visibility
# Note that this incurs some slight performance cost
vis_mask = agent.process_vis(grid.opacity)
# Warning about the rest of the function:
# Allows masking away objects that the agent isn't supposed to see.
# But breaks consistency between the states of the grid objects in the parial views
# and the grid objects overall.
if len(getattr(agent, 'hide_item_types', []))>0:
for i in range(grid.width):
for j in range(grid.height):
item = grid.get(i,j)
if (item is not None) and (item is not agent) and (item.type in agent.hide_item_types):
if len(item.agents) > 0:
grid.set(i,j,item.agents[0])
else:
grid.set(i,j,None)
return grid, vis_mask
def gen_agent_obs(self, agent):
"""
Generate the agent's view (partially observable, low-resolution encoding)
"""
grid, vis_mask = self.gen_obs_grid(agent)
grid_image = grid.render(tile_size=agent.view_tile_size, visible_mask=vis_mask, top_agent=agent)
if agent.observation_style=='image':
return grid_image
else:
ret = {'pov': grid_image}
if agent.observe_rewards:
ret['reward'] = getattr(agent, 'step_reward', 0)
if agent.observe_position:
agent_pos = agent.pos if agent.pos is not None else (0,0)
ret['position'] = np.array(agent_pos)/np.array([self.width, self.height], dtype=np.float)
if agent.observe_orientation:
agent_dir = agent.dir if agent.dir is not None else 0
ret['orientation'] = agent_dir
return ret
def gen_obs(self):
return [self.gen_agent_obs(agent) for agent in self.agents]
def __str__(self):
return self.grid.__str__()
def check_agent_position_integrity(self, title=''):
'''
This function checks whether each agent is present in the grid in exactly one place.
This is particularly helpful for validating the world state when ghost_mode=False and
agents can stack, since the logic for moving them around gets a bit messy.
Prints a message and drops into pdb if there's an inconsistency.
'''
agent_locs = [[] for _ in range(len(self.agents))]
for i in range(self.grid.width):
for j in range(self.grid.height):
x = self.grid.get(i,j)
for k,agent in enumerate(self.agents):
if x==agent:
agent_locs[k].append(('top', (i,j)))
if hasattr(x, 'agents') and agent in x.agents:
agent_locs[k].append(('stacked', (i,j)))
if not all([len(x)==1 for x in agent_locs]):
print(f"{title} > Failed integrity test!")
for a, al in zip(self.agents, agent_locs):
print(" > ", a.color,'-', al)
def step(self, actions):
# Spawn agents if it's time.
for agent in self.agents:
if not agent.active and not agent.done and self.step_count >= agent.spawn_delay:
self.place_obj(agent, **self.agent_spawn_kwargs)
agent.activate()
assert len(actions) == len(self.agents)
step_rewards = np.zeros((len(self.agents,)), dtype=np.float)
self.step_count += 1
iter_agents = list(enumerate(zip(self.agents, actions)))
iter_order = np.arange(len(iter_agents))
self.np_random.shuffle(iter_order)
for shuffled_ix in iter_order:
agent_no, (agent, action) = iter_agents[shuffled_ix]
agent.step_reward = 0
if agent.active:
cur_pos = agent.pos[:]
cur_cell = self.grid.get(*cur_pos)
fwd_pos = agent.front_pos[:]
fwd_cell = self.grid.get(*fwd_pos)
agent_moved = False
# Rotate left
if action == agent.actions.left:
agent.dir = (agent.dir - 1) % 4
# Rotate right
elif action == agent.actions.right:
agent.dir = (agent.dir + 1) % 4
# Move forward
elif action == agent.actions.forward:
# Under the follow conditions, the agent can move forward.
can_move = fwd_cell is None or fwd_cell.can_overlap() | if self.ghost_mode is False and isinstance(fwd_cell, GridAgent): | 4 | 2023-12-24 06:50:38+00:00 | 12k |
smonsays/modular-hyperteacher | tests/data/test_imitation.py | [
{
"identifier": "CompositionalGrid",
"path": "metax/data/envs/grid.py",
"snippet": "class CompositionalGrid(Environment):\n def __init__(\n self,\n grid_size: int,\n num_interactions: int,\n num_mazes: int,\n num_objects: int,\n num_distractors: int,\n ... | import unittest
import jax
import jax.numpy as jnp
from metax.data.envs.grid import CompositionalGrid
from metax.data.imitation import (ImitationMetaDataloader,
create_imitation_metaloader)
from metax.utils import tree_length | 7,470 | """
Copyright (c) Simon Schug
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class ImitationTestCase(unittest.TestCase):
rng = jax.random.PRNGKey(0)
def test_ImitationMetaDataloader(self):
env = CompositionalGrid(
grid_size := 7,
num_interactions := 3,
num_mazes := 2,
num_objects := 5,
num_distractors := 2,
frac_ood := 0.2,
task_support := "random",
seed := 2022,
)
loader = ImitationMetaDataloader(
env,
num_tasks := 2048,
shots_train := 1,
shots_test := 1,
meta_batch_size := 128,
mode="train",
train_test_split=False,
rng=self.rng
)
assert len(loader) == num_tasks / meta_batch_size
for batch in loader:
assert jnp.all(batch.train.task_id == batch.test.task_id)
# assert jnp.all(batch.train.x != batch.test.x)
# assert jnp.any(batch.train.y != batch.test.y)
assert len(batch.train.x) == meta_batch_size
def test_create_imitation_metaloader(self):
trainloader, testloader, validloader, oodloader, _ = create_imitation_metaloader(
name := "compositional_grid",
meta_batch_size := 128,
shots_train := 2,
shots_test := 2,
train_test_split := False,
num_tasks_train := 4096,
num_tasks_test := 1024,
num_tasks_valid := 1024,
num_tasks_ood := 1024,
seed := 2022,
grid_size=7,
num_interactions=3,
num_mazes=2,
num_objects=5,
num_distractors=2,
frac_ood=0.2,
task_support="random",
)
assert trainloader.sample_input.shape == (1, 7, 7, 5 + 2)
goals_ood = []
for batch in oodloader:
goals_ood.append(jnp.unique(batch.test.task_id[:, 0], axis=0))
goals_ood = jnp.concatenate(goals_ood)
goals_train = []
for batch in trainloader:
goals_train.append(jnp.unique(batch.test.task_id[:, 0], axis=0))
goals_train = jnp.unique(jnp.concatenate(goals_train), axis=0)
assert len(goals_train) + len(goals_ood) == 3 * 2 * 5 * 4
# Check that ood tasks are disjoint from train tasks
for g_ood in goals_ood:
assert not jnp.any(g_ood == goals_train)
for batch_test, batch_valid in zip(testloader, validloader):
| """
Copyright (c) Simon Schug
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class ImitationTestCase(unittest.TestCase):
rng = jax.random.PRNGKey(0)
def test_ImitationMetaDataloader(self):
env = CompositionalGrid(
grid_size := 7,
num_interactions := 3,
num_mazes := 2,
num_objects := 5,
num_distractors := 2,
frac_ood := 0.2,
task_support := "random",
seed := 2022,
)
loader = ImitationMetaDataloader(
env,
num_tasks := 2048,
shots_train := 1,
shots_test := 1,
meta_batch_size := 128,
mode="train",
train_test_split=False,
rng=self.rng
)
assert len(loader) == num_tasks / meta_batch_size
for batch in loader:
assert jnp.all(batch.train.task_id == batch.test.task_id)
# assert jnp.all(batch.train.x != batch.test.x)
# assert jnp.any(batch.train.y != batch.test.y)
assert len(batch.train.x) == meta_batch_size
def test_create_imitation_metaloader(self):
trainloader, testloader, validloader, oodloader, _ = create_imitation_metaloader(
name := "compositional_grid",
meta_batch_size := 128,
shots_train := 2,
shots_test := 2,
train_test_split := False,
num_tasks_train := 4096,
num_tasks_test := 1024,
num_tasks_valid := 1024,
num_tasks_ood := 1024,
seed := 2022,
grid_size=7,
num_interactions=3,
num_mazes=2,
num_objects=5,
num_distractors=2,
frac_ood=0.2,
task_support="random",
)
assert trainloader.sample_input.shape == (1, 7, 7, 5 + 2)
goals_ood = []
for batch in oodloader:
goals_ood.append(jnp.unique(batch.test.task_id[:, 0], axis=0))
goals_ood = jnp.concatenate(goals_ood)
goals_train = []
for batch in trainloader:
goals_train.append(jnp.unique(batch.test.task_id[:, 0], axis=0))
goals_train = jnp.unique(jnp.concatenate(goals_train), axis=0)
assert len(goals_train) + len(goals_ood) == 3 * 2 * 5 * 4
# Check that ood tasks are disjoint from train tasks
for g_ood in goals_ood:
assert not jnp.any(g_ood == goals_train)
for batch_test, batch_valid in zip(testloader, validloader): | assert tree_length(batch_test) == num_tasks_test | 3 | 2023-12-22 16:35:49+00:00 | 12k |
willfinnigan/RetroBioCat_2 | rbc2/mcts/mcts.py | [
{
"identifier": "MultiExpander",
"path": "rbc2/expansion/multi_expander.py",
"snippet": "class MultiExpander:\n\n def __init__(self,\n expanders: dict[str: Expander],\n network: Optional[Network] = None):\n\n if len(expanders) == 0:\n raise ValueError... | import time
from typing import Optional, List
from rbc2.expansion.multi_expander import MultiExpander
from rbc2.reaction_evaluation.feasability_filters import Filter, default_filter_repo
from rbc2.reaction_evaluation.starting_material_evaluator.starting_material_evaluator import \
DefaultSQLStartingMaterialEvaluator
from rbc2.utils.add_logger import add_logger
from rbc2.configs.logging_config import logging_config
from rbc2.configs.mcts_config import MCTS_Config
from rbc2.expansion.expander_repository import get_expanders
from rbc2.expansion.default_expander_interface import Expander
from rbc2.mcts.mcts_loop.backpropogate import backpropogate
from rbc2.mcts.mcts_loop.expansion.expand import Expansion
from rbc2.mcts.mcts_loop.rollout import rollout
from rbc2.mcts.mcts_loop.score_node import score_node
from rbc2.mcts.mcts_loop.selection import Selection
from rbc2.mcts.tree_node import create_root, MCTS_Node
from rbc2.reaction_network_entities.network import Network
from rbc2.reaction_network_entities.pathway import Pathway | 8,073 |
class MCTS():
def __init__(self,
target_smi: str,
expanders: dict[str: Expander],
filters: dict[str: Filter] = default_filter_repo,
starting_material_evaluator: Optional[DefaultSQLStartingMaterialEvaluator] = None,
network: Optional[Network] = None,
mcts_config: Optional[MCTS_Config] = None):
self.target_smi = target_smi
self.logger = add_logger('MCTS', level=logging_config.mcts)
# config
self.mcts_config = mcts_config
if self.mcts_config is None:
self.mcts_config = MCTS_Config()
# starting material evaluator
self.starting_material_evaluator = starting_material_evaluator
if self.starting_material_evaluator is None:
self.starting_material_evaluator = DefaultSQLStartingMaterialEvaluator()
# network - used to save expansions so they are only done once
self.network = network
if self.network is None:
self.network = Network()
# multi_expander made up of the individual expanders
self.multi_expander = MultiExpander(expanders, network=self.network)
# filters
self.filters = filters
# mcts steps
self.selection = Selection()
self.expansion = Expansion(self.multi_expander,
self.starting_material_evaluator,
self.mcts_config)
self.root: MCTS_Node = create_root(target_smi) # root node
self.solved = [] # the solved nodes, updated during the backpropagation step
self.search_complete = False # used to stop the search either on max iterations or max run time
# stats
self.iterations = 0
self.run_time = 0
self.positive_backpropagations = 0
def run(self, callback=None):
"""Runs the MCTS search"""
self.logger.debug(f'Running MCTS search for {self.target_smi}. Max time: {self.mcts_config.max_search_time} seconds. Max iterations: {self.mcts_config.max_iterations}')
t0 = time.time()
while self.search_complete is False:
self.do_a_loop()
self._check_run_time(t0)
if callback is not None and self.iterations % self.mcts_config.callback_iterations == 0:
callback(self)
def do_a_loop(self):
self.logger.debug(f'---- ITERATION {self.iterations} ----')
node = self.selection.select(self.root, self.mcts_config.exploration)
new_node = rollout(node, self.expansion, self.selection, self.network, self.filters, self.mcts_config)
if new_node is None:
self.logger.debug(f'Search complete - fully explored')
self.search_complete = True
score = score_node(new_node, self.mcts_config, self.starting_material_evaluator)
if score >= 0.95: self.positive_backpropagations += 1
self.solved += backpropogate(new_node, score)
self.iterations += 1
def _get_nodes(self, node: MCTS_Node) -> List[MCTS_Node]:
"""Returns all the nodes which are decendents of the given node"""
nodes = []
evaluated_children = [child for child in node.children if child.is_evaluated()]
nodes += evaluated_children
for child in evaluated_children:
nodes += self._get_nodes(child)
return nodes
def get_all_nodes(self) -> List[MCTS_Node]:
nodes = [self.root]
nodes += self._get_nodes(self.root)
return nodes
def get_solved_nodes(self) -> List[MCTS_Node]:
return self.solved
|
class MCTS():
def __init__(self,
target_smi: str,
expanders: dict[str: Expander],
filters: dict[str: Filter] = default_filter_repo,
starting_material_evaluator: Optional[DefaultSQLStartingMaterialEvaluator] = None,
network: Optional[Network] = None,
mcts_config: Optional[MCTS_Config] = None):
self.target_smi = target_smi
self.logger = add_logger('MCTS', level=logging_config.mcts)
# config
self.mcts_config = mcts_config
if self.mcts_config is None:
self.mcts_config = MCTS_Config()
# starting material evaluator
self.starting_material_evaluator = starting_material_evaluator
if self.starting_material_evaluator is None:
self.starting_material_evaluator = DefaultSQLStartingMaterialEvaluator()
# network - used to save expansions so they are only done once
self.network = network
if self.network is None:
self.network = Network()
# multi_expander made up of the individual expanders
self.multi_expander = MultiExpander(expanders, network=self.network)
# filters
self.filters = filters
# mcts steps
self.selection = Selection()
self.expansion = Expansion(self.multi_expander,
self.starting_material_evaluator,
self.mcts_config)
self.root: MCTS_Node = create_root(target_smi) # root node
self.solved = [] # the solved nodes, updated during the backpropagation step
self.search_complete = False # used to stop the search either on max iterations or max run time
# stats
self.iterations = 0
self.run_time = 0
self.positive_backpropagations = 0
def run(self, callback=None):
"""Runs the MCTS search"""
self.logger.debug(f'Running MCTS search for {self.target_smi}. Max time: {self.mcts_config.max_search_time} seconds. Max iterations: {self.mcts_config.max_iterations}')
t0 = time.time()
while self.search_complete is False:
self.do_a_loop()
self._check_run_time(t0)
if callback is not None and self.iterations % self.mcts_config.callback_iterations == 0:
callback(self)
def do_a_loop(self):
self.logger.debug(f'---- ITERATION {self.iterations} ----')
node = self.selection.select(self.root, self.mcts_config.exploration)
new_node = rollout(node, self.expansion, self.selection, self.network, self.filters, self.mcts_config)
if new_node is None:
self.logger.debug(f'Search complete - fully explored')
self.search_complete = True
score = score_node(new_node, self.mcts_config, self.starting_material_evaluator)
if score >= 0.95: self.positive_backpropagations += 1
self.solved += backpropogate(new_node, score)
self.iterations += 1
def _get_nodes(self, node: MCTS_Node) -> List[MCTS_Node]:
"""Returns all the nodes which are decendents of the given node"""
nodes = []
evaluated_children = [child for child in node.children if child.is_evaluated()]
nodes += evaluated_children
for child in evaluated_children:
nodes += self._get_nodes(child)
return nodes
def get_all_nodes(self) -> List[MCTS_Node]:
nodes = [self.root]
nodes += self._get_nodes(self.root)
return nodes
def get_solved_nodes(self) -> List[MCTS_Node]:
return self.solved
| def get_solved_pathways(self) -> List[Pathway]: | 16 | 2023-12-30 11:33:41+00:00 | 12k |
DerwenAI/textgraphs | textgraphs/kg.py | [
{
"identifier": "DBPEDIA_MIN_ALIAS",
"path": "textgraphs/defaults.py",
"snippet": "DBPEDIA_MIN_ALIAS: float = 0.8"
},
{
"identifier": "DBPEDIA_MIN_SIM",
"path": "textgraphs/defaults.py",
"snippet": "DBPEDIA_MIN_SIM: float = 0.9"
},
{
"identifier": "DBPEDIA_SEARCH_API",
"path"... | from collections import OrderedDict
from difflib import SequenceMatcher
from bs4 import BeautifulSoup # pylint: disable=E0401
from icecream import ic # pylint: disable=E0401
from qwikidata.linked_data_interface import get_entity_dict_from_api # pylint: disable=E0401
from .defaults import DBPEDIA_MIN_ALIAS, DBPEDIA_MIN_SIM, \
DBPEDIA_SEARCH_API, DBPEDIA_SPARQL_API, DBPEDIA_SPOTLIGHT_API, \
WIKIDATA_API
from .elem import Edge, KGSearchHit, LinkedEntity, Node, NodeEnum, RelEnum
from .graph import SimpleGraph
from .pipe import KnowledgeGraph, Pipeline, PipelineFactory
import http
import json
import time
import traceback
import typing
import urllib.parse
import markdown2 # pylint: disable=E0401
import requests # type: ignore # pylint: disable=E0401
import spacy # pylint: disable=E0401 | 7,931 |
lang:
language identifier
debug:
debugging flag
"""
hit: dict = {}
params: dict = {
"action": "wbsearchentities",
"type": search_type,
"language": lang,
"format": "json",
"continue": "0",
"search": query,
}
response: requests.models.Response = requests.get(
self.wikidata_api,
params = params,
verify = False,
headers = {
"Accept": "application/json",
},
)
if debug:
ic(response.status_code)
# check for API success
if http.HTTPStatus.OK == response.status_code:
dat: dict = response.json()
hit = dat["search"][0]
#print(json.dumps(hit, indent = 2, sort_keys = True))
return hit
@classmethod
def _match_aliases (
cls,
query: str,
label: str,
aliases: typing.List[ str ],
*,
debug: bool = False,
) -> typing.Tuple[ float, str ]:
"""
Find the best-matching aliases for a search term.
query:
query string
label:
entity label to be matched against the available aliases
aliases:
list of the available aliases
debug:
debugging flag
"""
# best case scenario: the label is an exact match
if query == label.lower():
return ( 1.0, label, )
# ...therefore the label is not an exact match
prob_list: typing.List[ typing.Tuple[ float, str ]] = [
( SequenceMatcher(None, query, label.lower()).ratio(), label, )
]
# fallback: test the aliases
for alias in aliases:
prob: float = SequenceMatcher(None, query, alias.lower()).ratio()
if prob == 1.0:
# early termination for success
return ( prob, alias, )
prob_list.append(( prob, alias, ))
# find the closest match
prob_list.sort(reverse = True)
if debug:
ic(prob_list)
return prob_list[0]
def _md_to_text (
self,
md_text: str,
) -> str:
"""
Convert markdown to plain text.
<https://stackoverflow.com/questions/761824/python-how-to-convert-markdown-formatted-text-to-text>
md_text:
markdown text (unrendered)
returns:
rendered plain text as a string
"""
soup: BeautifulSoup = BeautifulSoup(
self.markdowner.convert(md_text),
features = "html.parser",
)
return soup.get_text().strip()
def wikidata_search (
self,
query: str,
*,
lang: str = "en",
debug: bool = False,
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=C0302
"""
This class provides a wrapper for access to a _knowledge graph_, which
then runs _entity linking_ and other functions in the pipeline.
This could provide an interface to a graph database, such as Neo4j,
StarDog, KùzuDB, etc., or to an API.
In this default case, we wrap services available via the WikiMedia APIs:
* DBPedia: Spotlight, SPARQL, Search
* Wikidata: Search
see copyright/license https://huggingface.co/spaces/DerwenAI/textgraphs/blob/main/README.md
"""
######################################################################
## class definitions
class KGWikiMedia (KnowledgeGraph): # pylint: disable=R0902,R0903
"""
Manage access to WikiMedia-related APIs.
"""
REL_ISA: str = "http://www.w3.org/1999/02/22-rdf-syntax-ns#type"
REL_SAME: str = "http://www.w3.org/2002/07/owl#sameAs"
NER_MAP: typing.Dict[ str, dict ] = OrderedDict({
"CARDINAL": {
"iri": "http://dbpedia.org/resource/Cardinal_number",
"definition": "Numerals that do not fall under another type"
},
"DATE": {
"iri": "http://dbpedia.org/ontology/date",
"definition": "Absolute or relative dates or periods"
},
"EVENT": {
"iri": "http://dbpedia.org/ontology/Event",
"definition": "Named hurricanes, battles, wars, sports events, etc."
},
"FAC": {
"iri": "http://dbpedia.org/ontology/Infrastructure",
"definition": "Buildings, airports, highways, bridges, etc."
},
"GPE": {
"iri": "http://dbpedia.org/ontology/Country",
"definition": "Countries, cities, states"
},
"LANGUAGE": {
"iri": "http://dbpedia.org/ontology/Language",
"definition": "Any named language"
},
"LAW": {
"iri": "http://dbpedia.org/ontology/Law",
"definition": "Named documents made into laws "
},
"LOC": {
"iri": "http://dbpedia.org/ontology/Place",
"definition": "Non-GPE locations, mountain ranges, bodies of water"
},
"MONEY": {
"iri": "http://dbpedia.org/resource/Money",
"definition": "Monetary values, including unit"
},
"NORP": {
"iri": "http://dbpedia.org/ontology/nationality",
"definition": "Nationalities or religious or political groups"
},
"ORDINAL": {
"iri": "http://dbpedia.org/resource/Ordinal_number",
"definition": "Ordinal number, i.e., first, second, etc."
},
"ORG": {
"iri": "http://dbpedia.org/ontology/Organisation",
"definition": "Companies, agencies, institutions, etc."
},
"PERCENT": {
"iri": "http://dbpedia.org/resource/Percentage",
"definition": "Percentage"
},
"PERSON": {
"iri": "http://dbpedia.org/ontology/Person",
"definition": "People, including fictional"
},
"PRODUCT": {
"iri": "http://dbpedia.org/ontology/product",
"definition": "Vehicles, weapons, foods, etc. (Not services)"
},
"QUANTITY": {
"iri": "http://dbpedia.org/resource/Quantity",
"definition": "Measurements, as of weight or distance"
},
"TIME": {
"iri": "http://dbpedia.org/ontology/time",
"definition": "Times smaller than a day"
},
"WORK OF ART": {
"iri": "http://dbpedia.org/resource/Work_of_art",
"definition": "Titles of books, songs, etc."
},
})
NS_PREFIX: typing.Dict[ str, str ] = OrderedDict({
"dbc": "http://dbpedia.org/resource/Category:",
"dbt": "http://dbpedia.org/resource/Template:",
"dbr": "http://dbpedia.org/resource/",
"yago":"http://dbpedia.org/class/yago/",
"dbd": "http://dbpedia.org/datatype/",
"dbo": "http://dbpedia.org/ontology/",
"dbp": "http://dbpedia.org/property/",
"units": "http://dbpedia.org/units/",
"dbpedia-commons": "http://commons.dbpedia.org/resource/",
"dbpedia-wikicompany": "http://dbpedia.openlinksw.com/wikicompany/",
"dbpedia-wikidata": "http://wikidata.dbpedia.org/resource/",
"wd": "http://www.wikidata.org/",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"schema": "https://schema.org/",
"owl": "http://www.w3.org/2002/07/owl#",
})
def __init__ ( # pylint: disable=W0102
self,
*,
spotlight_api: str = DBPEDIA_SPOTLIGHT_API,
dbpedia_search_api: str = DBPEDIA_SEARCH_API,
dbpedia_sparql_api: str = DBPEDIA_SPARQL_API,
wikidata_api: str = WIKIDATA_API,
ner_map: dict = NER_MAP,
ns_prefix: dict = NS_PREFIX,
min_alias: float = DBPEDIA_MIN_ALIAS,
min_similarity: float = DBPEDIA_MIN_SIM,
) -> None:
"""
Constructor.
spotlight_api:
`DBPedia Spotlight` API or equivalent local service
dbpedia_search_api:
`DBPedia Search` API or equivalent local service
dbpedia_sparql_api:
`DBPedia SPARQL` API or equivalent local service
wikidata_api:
`Wikidata Search` API or equivalent local service
ner_map:
named entity map for standardizing IRIs
ns_prefix:
RDF namespace prefixes
min_alias:
minimum alias probability threshold for accepting linked entities
min_similarity:
minimum label similarity threshold for accepting linked entities
"""
self.spotlight_api: str = spotlight_api
self.dbpedia_search_api: str = dbpedia_search_api
self.dbpedia_sparql_api: str = dbpedia_sparql_api
self.wikidata_api: str = wikidata_api
self.ner_map: dict = ner_map
self.ns_prefix: dict = ns_prefix
self.min_alias: float = min_alias
self.min_similarity: float = min_similarity
self.ent_cache: dict = {}
self.iri_cache: dict = {}
self.markdowner = markdown2.Markdown()
def augment_pipe (
self,
factory: PipelineFactory,
) -> None:
"""
Encapsulate a `spaCy` call to `add_pipe()` configuration.
factory:
a `PipelineFactory` used to configure components
"""
factory.aux_pipe.add_pipe(
"dbpedia_spotlight",
config = {
"dbpedia_rest_endpoint": self.spotlight_api, # type: ignore
},
)
def remap_ner (
self,
label: typing.Optional[ str ],
) -> typing.Optional[ str ]:
"""
Remap the OntoTypes4 values from NER output to more general-purpose IRIs.
label:
input NER label, an `OntoTypes4` value
returns:
an IRI for the named entity
"""
if label is None:
return None
try:
iri: typing.Optional[ dict ] = self.ner_map.get(label)
if iri is not None:
return iri["iri"]
except TypeError as ex:
ic(ex)
print(f"unknown label: {label}")
return None
def normalize_prefix (
self,
iri: str,
*,
debug: bool = False,
) -> str:
"""
Normalize the given IRI to use the standard DBPedia namespace prefixes.
iri:
input IRI, in fully-qualified domain representation
debug:
debugging flag
returns:
the compact IRI representation, using an RDF namespace prefix
"""
iri_parse: urllib.parse.ParseResult = urllib.parse.urlparse(iri)
if debug:
ic(iri_parse)
for prefix, ns_fqdn in self.ns_prefix.items():
ns_parse: urllib.parse.ParseResult = urllib.parse.urlparse(ns_fqdn)
if debug:
ic(prefix, ns_parse.netloc, ns_parse.path)
if iri_parse.netloc == ns_parse.netloc and iri_parse.path.startswith(ns_parse.path):
slug: str = iri_parse.path.replace(ns_parse.path, "")
# return normalized IRI
return f"{prefix}:{slug}"
# normalization failed
return iri
def perform_entity_linking (
self,
graph: SimpleGraph,
pipe: Pipeline,
*,
debug: bool = False,
) -> None:
"""
Perform _entity linking_ based on `DBPedia Spotlight` and other services.
graph:
source graph
pipe:
configured pipeline for the current document
debug:
debugging flag
"""
# first pass: use "spotlight" API to markup text
iter_ents: typing.Iterator[ LinkedEntity ] = self._link_spotlight_entities(
pipe,
debug = debug
)
for link in iter_ents:
_ = self._make_link(
graph,
pipe,
link,
self.REL_ISA,
debug = debug,
)
_ = self._secondary_entity_linking(
graph,
pipe,
link,
debug = debug,
)
# second pass: use KG search on entities which weren't linked by Spotlight
iter_ents = self._link_kg_search_entities(
graph,
debug = debug,
)
for link in iter_ents:
_ = self._make_link(
graph,
pipe,
link,
self.REL_ISA,
debug = debug,
)
_ = self._secondary_entity_linking(
graph,
pipe,
link,
debug = debug,
)
def resolve_rel_iri (
self,
rel: str,
*,
lang: str = "en",
debug: bool = False,
) -> typing.Optional[ str ]:
"""
Resolve a `rel` string from a _relation extraction_ model which has
been trained on this _knowledge graph_, which defaults to using the
`WikiMedia` graphs.
rel:
relation label, generation these source from Wikidata for many RE projects
lang:
language identifier
debug:
debugging flag
returns:
a resolved IRI
"""
# first, check the cache
if rel in self.iri_cache:
return self.iri_cache.get(rel)
# otherwise construct a Wikidata API search
try:
hit: dict = self._wikidata_endpoint(
rel,
search_type = "property",
lang = lang,
debug = debug,
)
if debug:
ic(hit["label"], hit["id"])
# get the `claims` of the Wikidata property
prop_id: str = hit["id"]
prop_dict: dict = get_entity_dict_from_api(prop_id)
claims: dict = prop_dict["claims"]
if "P1628" in claims:
# use `equivalent property` if available
iri: str = claims["P1628"][0]["mainsnak"]["datavalue"]["value"]
elif "P2235" in claims:
# use `external superproperty` as a fallback
iri = claims["P2235"][0]["mainsnak"]["datavalue"]["value"]
else:
ic("no related claims", rel)
return None
if debug:
ic(iri)
# update the cache
self.iri_cache[rel] = iri
return iri
except requests.exceptions.ConnectionError as r_ex:
ic(r_ex)
return None
except Exception as ex: # pylint: disable=W0718
ic(ex)
traceback.print_exc()
return None
######################################################################
## private methods, customized per KG instance
def _wikidata_endpoint (
self,
query: str,
*,
search_type: str = "item",
lang: str = "en",
debug: bool = False,
) -> dict:
"""
Call a generic endpoint for Wikidata API.
Raises various untrapped exceptions, to be handled by caller.
query:
query string
search_type:
search type
lang:
language identifier
debug:
debugging flag
"""
hit: dict = {}
params: dict = {
"action": "wbsearchentities",
"type": search_type,
"language": lang,
"format": "json",
"continue": "0",
"search": query,
}
response: requests.models.Response = requests.get(
self.wikidata_api,
params = params,
verify = False,
headers = {
"Accept": "application/json",
},
)
if debug:
ic(response.status_code)
# check for API success
if http.HTTPStatus.OK == response.status_code:
dat: dict = response.json()
hit = dat["search"][0]
#print(json.dumps(hit, indent = 2, sort_keys = True))
return hit
@classmethod
def _match_aliases (
cls,
query: str,
label: str,
aliases: typing.List[ str ],
*,
debug: bool = False,
) -> typing.Tuple[ float, str ]:
"""
Find the best-matching aliases for a search term.
query:
query string
label:
entity label to be matched against the available aliases
aliases:
list of the available aliases
debug:
debugging flag
"""
# best case scenario: the label is an exact match
if query == label.lower():
return ( 1.0, label, )
# ...therefore the label is not an exact match
prob_list: typing.List[ typing.Tuple[ float, str ]] = [
( SequenceMatcher(None, query, label.lower()).ratio(), label, )
]
# fallback: test the aliases
for alias in aliases:
prob: float = SequenceMatcher(None, query, alias.lower()).ratio()
if prob == 1.0:
# early termination for success
return ( prob, alias, )
prob_list.append(( prob, alias, ))
# find the closest match
prob_list.sort(reverse = True)
if debug:
ic(prob_list)
return prob_list[0]
def _md_to_text (
self,
md_text: str,
) -> str:
"""
Convert markdown to plain text.
<https://stackoverflow.com/questions/761824/python-how-to-convert-markdown-formatted-text-to-text>
md_text:
markdown text (unrendered)
returns:
rendered plain text as a string
"""
soup: BeautifulSoup = BeautifulSoup(
self.markdowner.convert(md_text),
features = "html.parser",
)
return soup.get_text().strip()
def wikidata_search (
self,
query: str,
*,
lang: str = "en",
debug: bool = False, | ) -> typing.Optional[ KGSearchHit ]: | 7 | 2023-12-25 11:42:53+00:00 | 12k |
pkariz/grin-explorer | backend/api/tests.py | [
{
"identifier": "Blockchain",
"path": "backend/api/models.py",
"snippet": "class Blockchain(TimeStampedModel):\n id = models.BigAutoField(primary_key=True)\n # testnet, mainnet etc\n name = models.CharField(max_length=255, unique=True)\n # slug of the name, we use it in url\n slug = model... | from django.test import TestCase
from .models import (
Blockchain,
Block,
BlockHeader,
Input,
Output,
Kernel,
Reorg,
Node,
NodeGroup,
)
from .bootstrap import fetch_and_store_block
from unittest.mock import patch, Mock
from backend.api.bootstrap import NodeV2API
from backend.api.bootstrap import NodeV2API, load_blocks
from backend.api.bootstrap import NodeV2API
from backend.api.bootstrap import NodeV2API
import json | 7,867 | self._get_output(2, 'a', False),
]), # h101.2
self._get_fake_block(3, 'h101.2', ['g2', 'a'], [
self._get_output(3, 'f', False),
self._get_output(3, 'b', False),
self._get_output(3, 'c', False),
]), # h102.3
self._get_fake_block(4, 'h102.3', ['c'], [
self._get_output(4, 'h', False)
]), # h103.3
self._get_fake_block(3, 'h101.2', ['a'], [
self._get_output(3, 'b', False),
self._get_output(3, 'c', False),
]), # first reorg - h102.2
self._get_fake_block(4, 'h102.2', ['b'], [
self._get_output(4, 'd', False),
]), # 103.2
self._get_fake_block(5, 'h103.2', [], []), # 104.2
self._get_fake_block(6, 'h104.2', ['c'], [
self._get_output(6, 'e', False),
]), # 105.2
self._get_fake_block(2, 'h100', ['g1'], [
self._get_output(2, 'a', False),
]), # second reorg - h101
self._get_fake_block(3, 'h101', ['g3', 'a'], [
self._get_output(3, 'i', False),
self._get_output(3, 'b', False),
self._get_output(3, 'c', False),
]), # h102
self._get_fake_block(4, 'h102', ['b'], [
self._get_output(4, 'd', False),
]), # h103
]
# make sure node returns reorg data as defined in the function docs
node_instance_mock = Mock()
node_instance_mock.get_header.side_effect = headers
node_instance_mock.get_block.side_effect = blocks
self.nodeV2APIMock.return_value = node_instance_mock
# send new blocks to accepted-block view (includes 2 reorgs)
for i in range(0, len(headers)):
header = headers[i]
post_data = self._get_accepted_block_data(
header['height'], header['hash'], header['previous']
)
self.client.post(
f'/api/blockchains/{self.blockchain.slug}/accepted/',
json.dumps(post_data),
content_type="application/json"
)
# validate correctness of the main chain block sequence
main_chain_blocks = self.blockchain.blocks\
.filter(reorg__isnull=True)\
.order_by('height')
expected_main_chain = [
{ 'height': 1, 'hash': self.to_hex('h100'), 'prev_hash': None },
{ 'height': 2, 'hash': self.to_hex('h101'), 'prev_hash': self.to_hex('h100') },
{ 'height': 3, 'hash': self.to_hex('h102'), 'prev_hash': self.to_hex('h101') },
{ 'height': 4, 'hash': self.to_hex('h103'), 'prev_hash': self.to_hex('h102') },
]
actual_main_chain = [
{
'height': block.height,
'hash': block.hash,
'prev_hash': block.prev_hash,
}
for block in main_chain_blocks
]
self.assertEqual(actual_main_chain, expected_main_chain)
# reorgs validation
self.assertEqual(Reorg.objects.count(), 2)
# validate correctness of the first reorg
reorg1 = Reorg.objects.first()
self.assertEqual(reorg1.blockchain, self.blockchain)
self.assertEqual(reorg1.start_reorg_block.hash, self.to_hex('h102.3'))
self.assertEqual(reorg1.end_reorg_block.hash, self.to_hex('h103.3'))
self.assertEqual(reorg1.start_main_block.hash, self.to_hex('h102.2'))
# validate correctness of the second reorg
reorg2 = Reorg.objects.last()
self.assertEqual(reorg2.blockchain, self.blockchain)
self.assertEqual(reorg2.start_reorg_block.hash, self.to_hex('h101.2'))
self.assertEqual(reorg2.end_reorg_block.hash, self.to_hex('h105.2'))
self.assertEqual(reorg2.start_main_block.hash, self.to_hex('h101'))
# validate all inputs
main_inputs = set(map(
lambda input: (
input.commitment,
input.output.block.hash if input.output else None,
input.block.reorg.id if input.block.reorg else None
),
Input.objects.all()
))
expected_inputs = set([
# pairs (<commitment>, <block_hash_of_related_output>, <reorgID>)
# main
('g1', self.to_hex('h100'), None),
('g3', self.to_hex('h100'), None),
('a', self.to_hex('h101'), None),
('b', self.to_hex('h102'), None),
# reorg 2
('g1', self.to_hex('h100'), 2),
('a', self.to_hex('h101.2'), 2),
('b', self.to_hex('h102.2'), 2),
('c', self.to_hex('h102.2'), 2),
# reorg 1
('g2', self.to_hex('h100'), 1),
('a', self.to_hex('h101.2'), 1),
('c', self.to_hex('h102.3'), 1),
])
self.assertEqual(main_inputs, expected_inputs)
# validate all outputs
main_outputs = set(map(
lambda output: (
output.commitment,
output.block.hash,
output.spent,
tuple(sorted(map(
lambda input: input.block.hash,
output.inputs.all()
)))
),
|
class ReorgTestCase(TestCase):
def setUp(self):
self.patcher = patch('backend.api.bootstrap.NodeV2API')
self.nodeV2APIMock = self.patcher.start()
node_group = NodeGroup.objects.create(name='foo group')
node = Node.objects.create(
name='test',
api_url='foo_url',
api_username='foouser',
api_password='foopw',
archive=False,
group=node_group,
)
self.blockchain = Blockchain.objects.create(
name='test',
node=node,
default=True,
fetch_price=False,
)
def tearDown(self):
self.patcher.stop()
def to_hex(self, s):
# in some cases some previous hash might be None
if s is None:
return
return bytes(s, 'utf-8').hex()
def _get_fake_header(self, height, hash, prev_hash):
return {
'cuckoo_solution': list(range(1, 43)),
'edge_bits': 32,
'hash': self.to_hex(hash),
'height': height,
'kernel_mmr_size': 1,
'kernel_root': 'foo-kernel-root',
'nonce': 1,
'output_mmr_size': 1,
'output_root': 'foo-output-root',
'prev_root': 'foo-prev-root',
'previous': self.to_hex(prev_hash),
'range_proof_root': 'foo-range-proof-root',
'secondary_scaling': 0,
'timestamp': '2000-01-01T00:00:00+00:00',
'total_difficulty': 1,
'total_kernel_offset': 'foo-total-kernel-offset',
'version': 5,
}
def _get_fake_block(self, height, prev_hash, inputs, outputs):
return {
'header': {
'previous': self.to_hex(prev_hash),
},
'inputs': inputs,
'kernels': [
{
'excess': 'foo-excess',
'excess_sig': 'foo-excess-sig',
'features': 'Plain',
'fee': 30000000,
'fee_shift': 0,
'lock_height': 0,
},
],
'outputs': outputs,
}
def _get_accepted_block_data(self, height, hash, prev_hash):
# return only the data that's read in the view
if prev_hash:
# we convert prev_hash to bytearray (but as list of ints) because
# that's what's POST-ed to the accepted-block API.
prev_hash = [x for x in bytearray(bytes.fromhex(prev_hash))]
return {
'data': {
'body': {},
'header': {
'height': height,
'prev_hash': prev_hash,
},
},
# here hash is already hex
'hash': hash,
}
def _get_output(self, height, commit, spent):
return {
'block_height': height,
'commit': commit,
'merkle_proof': None,
'mmr_index': 0,
'output_type': 'Transaction',
'proof': 'foo-proof',
'proof_hash': 'foo-proof-hash',
'spent': spent,
}
def test_reorg_through_accepted_block_view(self):
"""
Test nested reorg scenario for accepted-block view.
0 = main chain
1 and 2 = Reorg 1 and Reorg 2
BLOCK ORDER:
100 --> 100(0)
101.2 --> 100(0), 101.2(0)
102.3 --> 100(0), 101.2(0), 102.3(0)
103.3 --> 100(0), 101.2(0), 102.3(0), 103.3(0)
102.2 --> 100(0), 101.2(0), 102.2(0):
FIND AND MARK OLD AS REORG 1: 102.3(1), 103.3(1)
103.2 --> 100(0), 101.2(0), 102.2(0), 103.2(0):
THE OLD ONES STAY THE SAME: 102.3(1), 103.3(1)
104.2 --> 100(0), 101.2(0), 102.2(0), 103.2(0), 104.2(0):
THE OLD ONES STAY THE SAME: 102.3(1), 103.3(1)
105.2 --> 100(0), 101.2(0), 102.2(0), 103.2(0), 104.2(0), 105.2(0):
THE OLD ONES STAY THE SAME: 102.3(1), 103.3(1)
101 --> 100(0), 101(0):
FIND AND MARK OLD AS REORG 2: 101.2(2), 102.2(2), 103.2(2), 104.2(2), 105.2(2)
PREVIOUS REORGS: 102.3(1), 103.3(1)
102 --> 100(0), 101(0), 102(0):
THE OLD ONES STAY THE SAME: 102.3(1), 103.3(1), 101.2(2), 102.2(2), 103.2(2), 104.2(2), 105.2(2)
103 --> 100(0), 101(0), 102(0), 103(0):
THE OLD ONES STAY THE SAME: 102.3(1), 103.3(1), 101.2(2), 102.2(2), 103.2(2), 104.2(2), 105.2(2)
"""
# define header/block sequence as defined in the function docstring
headers = [
self._get_fake_header(1, 'h100', None), # genesis
self._get_fake_header(2, 'h101.2', 'h100'),
self._get_fake_header(3, 'h102.3', 'h101.2'),
self._get_fake_header(4, 'h103.3', 'h102.3'),
self._get_fake_header(3, 'h102.2', 'h101.2'), # first reorg
self._get_fake_header(4, 'h103.2', 'h102.2'),
self._get_fake_header(5, 'h104.2', 'h103.2'),
self._get_fake_header(6, 'h105.2', 'h104.2'),
self._get_fake_header(2, 'h101', 'h100'), # second reorg
self._get_fake_header(3, 'h102', 'h101'),
self._get_fake_header(4, 'h103', 'h102'),
]
blocks = [
self._get_fake_block(1, None, [], [
self._get_output(1, 'g1', False),
self._get_output(1, 'g2', False),
self._get_output(1, 'g3', False)
]), # genesis - h100
self._get_fake_block(2, 'h100', ['g1'], [
self._get_output(2, 'a', False),
]), # h101.2
self._get_fake_block(3, 'h101.2', ['g2', 'a'], [
self._get_output(3, 'f', False),
self._get_output(3, 'b', False),
self._get_output(3, 'c', False),
]), # h102.3
self._get_fake_block(4, 'h102.3', ['c'], [
self._get_output(4, 'h', False)
]), # h103.3
self._get_fake_block(3, 'h101.2', ['a'], [
self._get_output(3, 'b', False),
self._get_output(3, 'c', False),
]), # first reorg - h102.2
self._get_fake_block(4, 'h102.2', ['b'], [
self._get_output(4, 'd', False),
]), # 103.2
self._get_fake_block(5, 'h103.2', [], []), # 104.2
self._get_fake_block(6, 'h104.2', ['c'], [
self._get_output(6, 'e', False),
]), # 105.2
self._get_fake_block(2, 'h100', ['g1'], [
self._get_output(2, 'a', False),
]), # second reorg - h101
self._get_fake_block(3, 'h101', ['g3', 'a'], [
self._get_output(3, 'i', False),
self._get_output(3, 'b', False),
self._get_output(3, 'c', False),
]), # h102
self._get_fake_block(4, 'h102', ['b'], [
self._get_output(4, 'd', False),
]), # h103
]
# make sure node returns reorg data as defined in the function docs
node_instance_mock = Mock()
node_instance_mock.get_header.side_effect = headers
node_instance_mock.get_block.side_effect = blocks
self.nodeV2APIMock.return_value = node_instance_mock
# send new blocks to accepted-block view (includes 2 reorgs)
for i in range(0, len(headers)):
header = headers[i]
post_data = self._get_accepted_block_data(
header['height'], header['hash'], header['previous']
)
self.client.post(
f'/api/blockchains/{self.blockchain.slug}/accepted/',
json.dumps(post_data),
content_type="application/json"
)
# validate correctness of the main chain block sequence
main_chain_blocks = self.blockchain.blocks\
.filter(reorg__isnull=True)\
.order_by('height')
expected_main_chain = [
{ 'height': 1, 'hash': self.to_hex('h100'), 'prev_hash': None },
{ 'height': 2, 'hash': self.to_hex('h101'), 'prev_hash': self.to_hex('h100') },
{ 'height': 3, 'hash': self.to_hex('h102'), 'prev_hash': self.to_hex('h101') },
{ 'height': 4, 'hash': self.to_hex('h103'), 'prev_hash': self.to_hex('h102') },
]
actual_main_chain = [
{
'height': block.height,
'hash': block.hash,
'prev_hash': block.prev_hash,
}
for block in main_chain_blocks
]
self.assertEqual(actual_main_chain, expected_main_chain)
# reorgs validation
self.assertEqual(Reorg.objects.count(), 2)
# validate correctness of the first reorg
reorg1 = Reorg.objects.first()
self.assertEqual(reorg1.blockchain, self.blockchain)
self.assertEqual(reorg1.start_reorg_block.hash, self.to_hex('h102.3'))
self.assertEqual(reorg1.end_reorg_block.hash, self.to_hex('h103.3'))
self.assertEqual(reorg1.start_main_block.hash, self.to_hex('h102.2'))
# validate correctness of the second reorg
reorg2 = Reorg.objects.last()
self.assertEqual(reorg2.blockchain, self.blockchain)
self.assertEqual(reorg2.start_reorg_block.hash, self.to_hex('h101.2'))
self.assertEqual(reorg2.end_reorg_block.hash, self.to_hex('h105.2'))
self.assertEqual(reorg2.start_main_block.hash, self.to_hex('h101'))
# validate all inputs
main_inputs = set(map(
lambda input: (
input.commitment,
input.output.block.hash if input.output else None,
input.block.reorg.id if input.block.reorg else None
),
Input.objects.all()
))
expected_inputs = set([
# pairs (<commitment>, <block_hash_of_related_output>, <reorgID>)
# main
('g1', self.to_hex('h100'), None),
('g3', self.to_hex('h100'), None),
('a', self.to_hex('h101'), None),
('b', self.to_hex('h102'), None),
# reorg 2
('g1', self.to_hex('h100'), 2),
('a', self.to_hex('h101.2'), 2),
('b', self.to_hex('h102.2'), 2),
('c', self.to_hex('h102.2'), 2),
# reorg 1
('g2', self.to_hex('h100'), 1),
('a', self.to_hex('h101.2'), 1),
('c', self.to_hex('h102.3'), 1),
])
self.assertEqual(main_inputs, expected_inputs)
# validate all outputs
main_outputs = set(map(
lambda output: (
output.commitment,
output.block.hash,
output.spent,
tuple(sorted(map(
lambda input: input.block.hash,
output.inputs.all()
)))
), | Output.objects.all() | 4 | 2023-12-24 22:15:11+00:00 | 12k |
datrocity/pond | pond/activity.py | [
{
"identifier": "Artifact",
"path": "pond/artifact/artifact.py",
"snippet": "class Artifact(ABC):\n \"\"\" Knows how to read and write one type of artifact.\n\n Concrete Artifact implementation should save the metadata with the data if possible,\n so that the artifact is self-contained even if,... | from typing import Any, Dict, Optional, Set, Type, Union
from pond.artifact import Artifact
from pond.artifact.artifact_registry import ArtifactRegistry, global_artifact_registry
from pond.conventions import DataType, WriteMode
from pond.metadata.metadata_source import MetadataSource
from pond.metadata.dict import DictMetadataSource
from pond.metadata.manifest import Manifest
from pond.storage.datastore import Datastore
from pond.version import Version
from pond.version_name import SimpleVersionName, VersionName
from pond.versioned_artifact import VersionedArtifact | 9,823 | create folder-like groups inside a datastore. This can be, for instance, the name of
a project or experiment.
datastore: Datastore
Data store object, representing the storage where the artifacts are read/written.
author: str
Author name/identifier, used as metadata. Default is 'NA'.
version_name_class: VersionName
Class used to create increasing version names. The default value,
`SimpleVersionName` creates version names as `v1`, `v2`, etc.
artifact_registry: ArtifactRegistry
Registry object mapping data types and file formats to an artifact class able to
read/write them. The artifact classes distributed with `pond` register automatically
to the default value, `global_artifact_registry`.
"""
self.source = source
self.location = location
self.datastore = datastore
self.author = author
self.version_name_class = version_name_class
self.artifact_registry = artifact_registry
# History of all read versions, will be used as default
# "inputs" for written tables. Feel free to empty it whenever needed.
self.read_history: Set[str] = set()
# Dict[TableRef, List[Version]]: History of all written versions
self.write_history: Set[str] = set()
def read_version(self,
name: str,
version_name: Optional[Union[str, VersionName]] = None) -> Version:
""" Read a version, given its name and version name.
If no version name is specified, the latest version is read.
Parameters
----------
name: str
Artifact name
version_name: str or VersionName
Version name, given as a string (more common) or as VersionName instance. If None,
the latest version name for the given artifact is used.
Return
------
version: Version
The loaded Version object.
See Also
--------
`read_artifact` -- Read an Artifact object, including artifact data and metadata
`read` -- Read the data in an artifact
"""
versioned_artifact = VersionedArtifact.from_datastore(
artifact_name=name,
location=self.location,
datastore=self.datastore,
)
version = versioned_artifact.read(version_name=version_name)
version_id = version.get_uri(self.location, self.datastore)
self.read_history.add(version_id)
return version
def read_artifact(self,
name: str,
version_name: Optional[Union[str, VersionName]] = None) -> Any:
""" Read an artifact given its name and version name.
If no version name is specified, the latest version is read.
Parameters
----------
name: str
Artifact name
version_name: str or VersionName
Version name, given as a string (more common) or as VersionName instance. If None,
the latest version name for the given artifact is used.
Return
------
artifact: Artifact
The loaded artifact
See Also
--------
`read` -- Read the data in an artifact
`read_version` -- Read a Version object, including the artifact object and version manifest
"""
version = self.read_version(name, version_name)
return version.artifact
def read(self,
name: str,
version_name: Optional[Union[str, VersionName]] = None) -> Any:
""" Read some data given its name and version name.
If no version name is specified, the latest version is read.
Parameters
----------
name: str
Artifact name
version_name: str or VersionName
Version name, given as a string (more common) or as VersionName instance. If None,
the latest version name for the given artifact is used.
Return
------
data: Any
The loaded data. The metadata is discarded.
See Also
--------
`read_artifact` -- Read an Artifact object, including artifact data and metadata
`read_version` -- Read a Version object, including the artifact object and version manifest
"""
artifact = self.read_artifact(name, version_name)
return artifact.data
# TODO version name is a string vs is a VersionName instance
def write(self,
|
class Activity:
# TODO: can location have subpaths? e.g. `experiment1/test22`
def __init__(self,
source: str,
location: str,
datastore: Datastore,
author: str='NA',
version_name_class: Type[VersionName] = SimpleVersionName,
artifact_registry: ArtifactRegistry = global_artifact_registry):
""" Read and write artifacts with lineage and metadata.
Activity is the main user-facing interface for pond. Most of the usages of `pond` only
ever interact with an instance of this object.
Parameters
----------
source: str
String defining where the read/write operations are made. Often, this is the path of
a file or notebook, used for lineage tracing.
location: str
Root location in the data store where artifacts are read/written. This is used to
create folder-like groups inside a datastore. This can be, for instance, the name of
a project or experiment.
datastore: Datastore
Data store object, representing the storage where the artifacts are read/written.
author: str
Author name/identifier, used as metadata. Default is 'NA'.
version_name_class: VersionName
Class used to create increasing version names. The default value,
`SimpleVersionName` creates version names as `v1`, `v2`, etc.
artifact_registry: ArtifactRegistry
Registry object mapping data types and file formats to an artifact class able to
read/write them. The artifact classes distributed with `pond` register automatically
to the default value, `global_artifact_registry`.
"""
self.source = source
self.location = location
self.datastore = datastore
self.author = author
self.version_name_class = version_name_class
self.artifact_registry = artifact_registry
# History of all read versions, will be used as default
# "inputs" for written tables. Feel free to empty it whenever needed.
self.read_history: Set[str] = set()
# Dict[TableRef, List[Version]]: History of all written versions
self.write_history: Set[str] = set()
def read_version(self,
name: str,
version_name: Optional[Union[str, VersionName]] = None) -> Version:
""" Read a version, given its name and version name.
If no version name is specified, the latest version is read.
Parameters
----------
name: str
Artifact name
version_name: str or VersionName
Version name, given as a string (more common) or as VersionName instance. If None,
the latest version name for the given artifact is used.
Return
------
version: Version
The loaded Version object.
See Also
--------
`read_artifact` -- Read an Artifact object, including artifact data and metadata
`read` -- Read the data in an artifact
"""
versioned_artifact = VersionedArtifact.from_datastore(
artifact_name=name,
location=self.location,
datastore=self.datastore,
)
version = versioned_artifact.read(version_name=version_name)
version_id = version.get_uri(self.location, self.datastore)
self.read_history.add(version_id)
return version
def read_artifact(self,
name: str,
version_name: Optional[Union[str, VersionName]] = None) -> Any:
""" Read an artifact given its name and version name.
If no version name is specified, the latest version is read.
Parameters
----------
name: str
Artifact name
version_name: str or VersionName
Version name, given as a string (more common) or as VersionName instance. If None,
the latest version name for the given artifact is used.
Return
------
artifact: Artifact
The loaded artifact
See Also
--------
`read` -- Read the data in an artifact
`read_version` -- Read a Version object, including the artifact object and version manifest
"""
version = self.read_version(name, version_name)
return version.artifact
def read(self,
name: str,
version_name: Optional[Union[str, VersionName]] = None) -> Any:
""" Read some data given its name and version name.
If no version name is specified, the latest version is read.
Parameters
----------
name: str
Artifact name
version_name: str or VersionName
Version name, given as a string (more common) or as VersionName instance. If None,
the latest version name for the given artifact is used.
Return
------
data: Any
The loaded data. The metadata is discarded.
See Also
--------
`read_artifact` -- Read an Artifact object, including artifact data and metadata
`read_version` -- Read a Version object, including the artifact object and version manifest
"""
artifact = self.read_artifact(name, version_name)
return artifact.data
# TODO version name is a string vs is a VersionName instance
def write(self, | data: DataType, | 2 | 2023-12-24 13:05:58+00:00 | 12k |
demirogun/pyethnobiology | pyethnobiology/pyethnobiology.py | [
{
"identifier": "UR",
"path": "pyethnobiology/indices.py",
"snippet": "class UR:\n\n def __init__(self, data, informant_column=\"informant\", taxon_column=\"taxon\", use_column=\"ailments_treated\"):\n \"\"\"\n Initializes the class with necessary data and column names.\n\n Args:... | import pandas as pd
import rdata
from .indices import UR, CI, FC, NU, RFC, RI, UV, CV, FL, FIC
from .stats import Jaccard
from .visualization import ChordPlot | 9,529 |
class pyethnobiology:
"""
Encapsulates ethnobotanical data and analysis.
"""
def __init__(
self,
data: pd.DataFrame,
informant_column: str = "informant",
taxon_column: str = "taxon",
use_column: str = "ailments_treated",
literature_column: str = "literature",
convert_use_data: bool = False,
) -> None:
"""
Initializes the Ethnobotany class.
Args:
data: DataFrame containing ethnobotanical information.
informant_column: Name of the column containing informant IDs.
taxon_column: Name of the column containing species names.
use_column: Name of the column containing plant uses.
convert_use_data: Whether to convert use data format (optional).
"""
self.data = self.load_data(data, informant_column, taxon_column, use_column, convert_use_data)
self.informant_column = informant_column
self.taxon_column = taxon_column
self.use_column = use_column
self.literature_column = literature_column
def CI(self):
CI_class = CI(self.data, self.informant_column, self.taxon_column, self.use_column)
return CI_class
def FC(self):
FC_class = FC(self.data, self.informant_column, self.taxon_column, self.use_column)
return FC_class
|
class pyethnobiology:
"""
Encapsulates ethnobotanical data and analysis.
"""
def __init__(
self,
data: pd.DataFrame,
informant_column: str = "informant",
taxon_column: str = "taxon",
use_column: str = "ailments_treated",
literature_column: str = "literature",
convert_use_data: bool = False,
) -> None:
"""
Initializes the Ethnobotany class.
Args:
data: DataFrame containing ethnobotanical information.
informant_column: Name of the column containing informant IDs.
taxon_column: Name of the column containing species names.
use_column: Name of the column containing plant uses.
convert_use_data: Whether to convert use data format (optional).
"""
self.data = self.load_data(data, informant_column, taxon_column, use_column, convert_use_data)
self.informant_column = informant_column
self.taxon_column = taxon_column
self.use_column = use_column
self.literature_column = literature_column
def CI(self):
CI_class = CI(self.data, self.informant_column, self.taxon_column, self.use_column)
return CI_class
def FC(self):
FC_class = FC(self.data, self.informant_column, self.taxon_column, self.use_column)
return FC_class
| def NU(self): | 3 | 2023-12-25 01:06:51+00:00 | 12k |
JiePKU/MoLE | train_db.py | [
{
"identifier": "ConfigSanitizer",
"path": "library/config_util.py",
"snippet": "class ConfigSanitizer:\n # @curry\n @staticmethod\n def __validate_and_convert_twodim(klass, value: Sequence) -> Tuple:\n Schema(ExactSequence([klass, klass]))(value)\n return tuple(value)\n\n # @curry\n @staticm... | import gc
import time
import argparse
import itertools
import math
import os
import toml
import torch
import diffusers
import library.train_util as train_util
import library.config_util as config_util
import library.custom_train_functions as custom_train_functions
from multiprocessing import Value
from tqdm import tqdm
from accelerate.utils import set_seed
from diffusers import DDPMScheduler
from library.config_util import (
ConfigSanitizer,
BlueprintGenerator,
)
from library.custom_train_functions import (
apply_snr_weight,
get_weighted_text_embeddings,
prepare_scheduler_for_custom_training,
pyramid_noise_like,
apply_noise_offset,
scale_v_prediction_loss_like_noise_prediction,
) | 7,307 | train_dataloader = torch.utils.data.DataLoader(
train_dataset_group,
batch_size=1,
shuffle=True,
collate_fn=collater,
num_workers=n_workers,
persistent_workers=args.persistent_data_loader_workers,
)
# 学習ステップ数を計算する
if args.max_train_epochs is not None:
args.max_train_steps = args.max_train_epochs * math.ceil(
len(train_dataloader) / accelerator.num_processes / args.gradient_accumulation_steps
)
print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}")
# データセット側にも学習ステップを送信
train_dataset_group.set_max_train_steps(args.max_train_steps)
if args.stop_text_encoder_training is None:
args.stop_text_encoder_training = args.max_train_steps + 1 # do not stop until end
# lr schedulerを用意する TODO gradient_accumulation_stepsの扱いが何かおかしいかもしれない。後で確認する
lr_scheduler = train_util.get_scheduler_fix(args, optimizer, accelerator.num_processes)
# 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする
if args.full_fp16:
assert (
args.mixed_precision == "fp16"
), "full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。"
print("enable full fp16 training.")
unet.to(weight_dtype)
text_encoder.to(weight_dtype)
# acceleratorがなんかよろしくやってくれるらしい
if train_text_encoder:
unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
unet, text_encoder, optimizer, train_dataloader, lr_scheduler
)
else:
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)
# transform DDP after prepare
text_encoder, unet = train_util.transform_if_model_is_DDP(text_encoder, unet)
if not train_text_encoder:
text_encoder.to(accelerator.device, dtype=weight_dtype) # to avoid 'cpu' vs 'cuda' error
# 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする
if args.full_fp16:
train_util.patch_accelerator_for_fp16_training(accelerator)
# resumeする
train_util.resume_from_local_or_hf_if_specified(accelerator, args)
# epoch数を計算する
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
if (args.save_n_epoch_ratio is not None) and (args.save_n_epoch_ratio > 0):
args.save_every_n_epochs = math.floor(num_train_epochs / args.save_n_epoch_ratio) or 1
# 学習する
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
print("running training / 学習開始")
print(f" num train images * repeats / 学習画像の数×繰り返し回数: {train_dataset_group.num_train_images}")
print(f" num reg images / 正則化画像の数: {train_dataset_group.num_reg_images}")
print(f" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}")
print(f" num epochs / epoch数: {num_train_epochs}")
print(f" batch size per device / バッチサイズ: {args.train_batch_size}")
print(f" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}")
print(f" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}")
print(f" total optimization steps / 学習ステップ数: {args.max_train_steps}")
progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc="steps")
global_step = 0
noise_scheduler = DDPMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, clip_sample=False
)
prepare_scheduler_for_custom_training(noise_scheduler, accelerator.device)
if accelerator.is_main_process:
accelerator.init_trackers("dreambooth" if args.log_tracker_name is None else args.log_tracker_name)
loss_list = []
loss_total = 0.0
for epoch in range(num_train_epochs):
print(f"\nepoch {epoch+1}/{num_train_epochs}")
current_epoch.value = epoch + 1
# 指定したステップ数までText Encoderを学習する:epoch最初の状態
unet.train()
# train==True is required to enable gradient_checkpointing
if args.gradient_checkpointing or global_step < args.stop_text_encoder_training:
text_encoder.train()
for step, batch in enumerate(train_dataloader):
current_step.value = global_step
# 指定したステップ数でText Encoderの学習を止める
if global_step == args.stop_text_encoder_training:
print(f"stop text encoder training at step {global_step}")
if not args.gradient_checkpointing:
text_encoder.train(False)
text_encoder.requires_grad_(False)
with accelerator.accumulate(unet):
with torch.no_grad():
# latentに変換
if cache_latents:
latents = batch["latents"].to(accelerator.device)
else:
latents = vae.encode(batch["images"].to(dtype=weight_dtype)).latent_dist.sample()
latents = latents * 0.18215
b_size = latents.shape[0]
# Sample noise that we'll add to the latents
noise = torch.randn_like(latents, device=latents.device)
if args.noise_offset:
noise = apply_noise_offset(latents, noise, args.noise_offset, args.adaptive_noise_scale)
elif args.multires_noise_iterations:
| # DreamBooth training
# XXX dropped option: fine_tune
# perlin_noise,
def train(args):
train_util.verify_training_args(args)
train_util.prepare_dataset_args(args, False)
cache_latents = args.cache_latents
if args.seed is not None:
set_seed(args.seed) # 乱数系列を初期化する
tokenizer = train_util.load_tokenizer(args)
# データセットを準備する
if args.dataset_class is None:
blueprint_generator = BlueprintGenerator(ConfigSanitizer(True, False, True))
if args.dataset_config is not None:
print(f"Load dataset config from {args.dataset_config}")
user_config = config_util.load_user_config(args.dataset_config)
ignored = ["train_data_dir", "reg_data_dir"]
if any(getattr(args, attr) is not None for attr in ignored):
print(
"ignore following options because config file is found: {0} / 設定ファイルが利用されるため以下のオプションは無視されます: {0}".format(
", ".join(ignored)
)
)
else:
user_config = {
"datasets": [
{"subsets": config_util.generate_dreambooth_subsets_config_by_subdirs(args.train_data_dir, args.reg_data_dir)}
]
}
blueprint = blueprint_generator.generate(user_config, args, tokenizer=tokenizer)
train_dataset_group = config_util.generate_dataset_group_by_blueprint(blueprint.dataset_group)
else:
train_dataset_group = train_util.load_arbitrary_dataset(args, tokenizer)
current_epoch = Value("i", 0)
current_step = Value("i", 0)
ds_for_collater = train_dataset_group if args.max_data_loader_n_workers == 0 else None
collater = train_util.collater_class(current_epoch, current_step, ds_for_collater)
if args.no_token_padding:
train_dataset_group.disable_token_padding()
if args.debug_dataset:
train_util.debug_dataset(train_dataset_group)
return
if cache_latents:
assert (
train_dataset_group.is_latent_cacheable()
), "when caching latents, either color_aug or random_crop cannot be used / latentをキャッシュするときはcolor_augとrandom_cropは使えません"
# acceleratorを準備する
print("prepare accelerator")
if args.gradient_accumulation_steps > 1:
print(
f"gradient_accumulation_steps is {args.gradient_accumulation_steps}. accelerate does not support gradient_accumulation_steps when training multiple models (U-Net and Text Encoder), so something might be wrong"
)
print(
f"gradient_accumulation_stepsが{args.gradient_accumulation_steps}に設定されています。accelerateは複数モデル(U-NetおよびText Encoder)の学習時にgradient_accumulation_stepsをサポートしていないため結果は未知数です"
)
accelerator, unwrap_model = train_util.prepare_accelerator(args)
# mixed precisionに対応した型を用意しておき適宜castする
weight_dtype, save_dtype = train_util.prepare_dtype(args)
# モデルを読み込む
text_encoder, vae, unet, load_stable_diffusion_format = train_util.load_target_model(args, weight_dtype, accelerator)
# verify load/save model formats
if load_stable_diffusion_format:
src_stable_diffusion_ckpt = args.pretrained_model_name_or_path
src_diffusers_model_path = None
else:
src_stable_diffusion_ckpt = None
src_diffusers_model_path = args.pretrained_model_name_or_path
if args.save_model_as is None:
save_stable_diffusion_format = load_stable_diffusion_format
use_safetensors = args.use_safetensors
else:
save_stable_diffusion_format = args.save_model_as.lower() == "ckpt" or args.save_model_as.lower() == "safetensors"
use_safetensors = args.use_safetensors or ("safetensors" in args.save_model_as.lower())
# モデルに xformers とか memory efficient attention を組み込む
train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers)
# 学習を準備する
if cache_latents:
vae.to(accelerator.device, dtype=weight_dtype)
vae.requires_grad_(False)
vae.eval()
with torch.no_grad():
train_dataset_group.cache_latents(vae, args.vae_batch_size, args.cache_latents_to_disk, accelerator.is_main_process)
vae.to("cpu")
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
accelerator.wait_for_everyone()
# 学習を準備する:モデルを適切な状態にする
train_text_encoder = args.stop_text_encoder_training is None or args.stop_text_encoder_training >= 0
unet.requires_grad_(True) # 念のため追加
text_encoder.requires_grad_(train_text_encoder)
if not train_text_encoder:
print("Text Encoder is not trained.")
if args.gradient_checkpointing:
unet.enable_gradient_checkpointing()
text_encoder.gradient_checkpointing_enable()
if not cache_latents:
vae.requires_grad_(False)
vae.eval()
vae.to(accelerator.device, dtype=weight_dtype)
# 学習に必要なクラスを準備する
print("prepare optimizer, data loader etc.")
if train_text_encoder:
trainable_params = itertools.chain(unet.parameters(), text_encoder.parameters())
else:
trainable_params = unet.parameters()
_, _, optimizer = train_util.get_optimizer(args, trainable_params)
# dataloaderを準備する
# DataLoaderのプロセス数:0はメインプロセスになる
n_workers = min(args.max_data_loader_n_workers, os.cpu_count() - 1) # cpu_count-1 ただし最大で指定された数まで
train_dataloader = torch.utils.data.DataLoader(
train_dataset_group,
batch_size=1,
shuffle=True,
collate_fn=collater,
num_workers=n_workers,
persistent_workers=args.persistent_data_loader_workers,
)
# 学習ステップ数を計算する
if args.max_train_epochs is not None:
args.max_train_steps = args.max_train_epochs * math.ceil(
len(train_dataloader) / accelerator.num_processes / args.gradient_accumulation_steps
)
print(f"override steps. steps for {args.max_train_epochs} epochs is / 指定エポックまでのステップ数: {args.max_train_steps}")
# データセット側にも学習ステップを送信
train_dataset_group.set_max_train_steps(args.max_train_steps)
if args.stop_text_encoder_training is None:
args.stop_text_encoder_training = args.max_train_steps + 1 # do not stop until end
# lr schedulerを用意する TODO gradient_accumulation_stepsの扱いが何かおかしいかもしれない。後で確認する
lr_scheduler = train_util.get_scheduler_fix(args, optimizer, accelerator.num_processes)
# 実験的機能:勾配も含めたfp16学習を行う モデル全体をfp16にする
if args.full_fp16:
assert (
args.mixed_precision == "fp16"
), "full_fp16 requires mixed precision='fp16' / full_fp16を使う場合はmixed_precision='fp16'を指定してください。"
print("enable full fp16 training.")
unet.to(weight_dtype)
text_encoder.to(weight_dtype)
# acceleratorがなんかよろしくやってくれるらしい
if train_text_encoder:
unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
unet, text_encoder, optimizer, train_dataloader, lr_scheduler
)
else:
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(unet, optimizer, train_dataloader, lr_scheduler)
# transform DDP after prepare
text_encoder, unet = train_util.transform_if_model_is_DDP(text_encoder, unet)
if not train_text_encoder:
text_encoder.to(accelerator.device, dtype=weight_dtype) # to avoid 'cpu' vs 'cuda' error
# 実験的機能:勾配も含めたfp16学習を行う PyTorchにパッチを当ててfp16でのgrad scaleを有効にする
if args.full_fp16:
train_util.patch_accelerator_for_fp16_training(accelerator)
# resumeする
train_util.resume_from_local_or_hf_if_specified(accelerator, args)
# epoch数を計算する
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
if (args.save_n_epoch_ratio is not None) and (args.save_n_epoch_ratio > 0):
args.save_every_n_epochs = math.floor(num_train_epochs / args.save_n_epoch_ratio) or 1
# 学習する
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
print("running training / 学習開始")
print(f" num train images * repeats / 学習画像の数×繰り返し回数: {train_dataset_group.num_train_images}")
print(f" num reg images / 正則化画像の数: {train_dataset_group.num_reg_images}")
print(f" num batches per epoch / 1epochのバッチ数: {len(train_dataloader)}")
print(f" num epochs / epoch数: {num_train_epochs}")
print(f" batch size per device / バッチサイズ: {args.train_batch_size}")
print(f" total train batch size (with parallel & distributed & accumulation) / 総バッチサイズ(並列学習、勾配合計含む): {total_batch_size}")
print(f" gradient ccumulation steps / 勾配を合計するステップ数 = {args.gradient_accumulation_steps}")
print(f" total optimization steps / 学習ステップ数: {args.max_train_steps}")
progress_bar = tqdm(range(args.max_train_steps), smoothing=0, disable=not accelerator.is_local_main_process, desc="steps")
global_step = 0
noise_scheduler = DDPMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, clip_sample=False
)
prepare_scheduler_for_custom_training(noise_scheduler, accelerator.device)
if accelerator.is_main_process:
accelerator.init_trackers("dreambooth" if args.log_tracker_name is None else args.log_tracker_name)
loss_list = []
loss_total = 0.0
for epoch in range(num_train_epochs):
print(f"\nepoch {epoch+1}/{num_train_epochs}")
current_epoch.value = epoch + 1
# 指定したステップ数までText Encoderを学習する:epoch最初の状態
unet.train()
# train==True is required to enable gradient_checkpointing
if args.gradient_checkpointing or global_step < args.stop_text_encoder_training:
text_encoder.train()
for step, batch in enumerate(train_dataloader):
current_step.value = global_step
# 指定したステップ数でText Encoderの学習を止める
if global_step == args.stop_text_encoder_training:
print(f"stop text encoder training at step {global_step}")
if not args.gradient_checkpointing:
text_encoder.train(False)
text_encoder.requires_grad_(False)
with accelerator.accumulate(unet):
with torch.no_grad():
# latentに変換
if cache_latents:
latents = batch["latents"].to(accelerator.device)
else:
latents = vae.encode(batch["images"].to(dtype=weight_dtype)).latent_dist.sample()
latents = latents * 0.18215
b_size = latents.shape[0]
# Sample noise that we'll add to the latents
noise = torch.randn_like(latents, device=latents.device)
if args.noise_offset:
noise = apply_noise_offset(latents, noise, args.noise_offset, args.adaptive_noise_scale)
elif args.multires_noise_iterations: | noise = pyramid_noise_like(noise, latents.device, args.multires_noise_iterations, args.multires_noise_discount) | 5 | 2023-12-30 07:46:35+00:00 | 12k |
Hatins/DEOE | modules/detection.py | [
{
"identifier": "ObjectLabels",
"path": "data/genx_utils/labels.py",
"snippet": "class ObjectLabels(ObjectLabelBase):\n def __init__(self,\n object_labels: th.Tensor,\n input_size_hw: Tuple[int, int]):\n super().__init__(object_labels=object_labels, input_size_h... | from typing import Any, Optional, Tuple, Union, Dict
from warnings import warn
from omegaconf import DictConfig
from pytorch_lightning.utilities.types import STEP_OUTPUT
from data.genx_utils.labels import ObjectLabels
from data.utils.types import DataType, LstmStates, ObjDetOutput, DatasetSamplingMode
from models.detection.yolox.utils.boxes import postprocess
from models.detection.yolox_extension.models.detector import YoloXDetector
from utils.evaluation.prophesee.evaluator import PropheseeEvaluator
from utils.evaluation.prophesee.io.box_loading import to_prophesee
from utils.padding import InputPadderFromShape
from .utils.detection import BackboneFeatureSelector, EventReprSelector, RNNStates, REGStates, Mode, mode_2_string, \
merge_mixed_batches
import numpy as np
import pytorch_lightning as pl
import torch
import torch as th
import torch.distributed as dist
import os
import cv2
import ipdb | 9,370 | center = ((left + right) // 2, (top + bottom) // 2)
if class_id in unseen_classes:
color = (255, 165, 0)
cv2.putText(frame_copy, str(class_id), (center[0], bottom - 1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)
else:
color = (0, 255, 0)
cv2.rectangle(frame_copy, (left, top), (right, bottom), color, 1)
stacked_image = cv2.hconcat([frame, frame_copy])
save_path = save_dir + '{}.png'.format(index)
cv2.imwrite(save_path, stacked_image)
def concatenate_tensors(self, tensor1, tensor2, order1, order2):
D1 = tensor1.shape[0]
D2 = tensor2.shape[0]
D = D1 + D2
result_shape = (D,) + tensor1.shape[1:]
result = torch.zeros(result_shape, dtype=tensor1.dtype).to(tensor1.device)
for i, idx in enumerate(order1):
result[idx] = tensor1[i]
for i, idx in enumerate(order2):
result[idx] = tensor2[i]
return result
def subtract_lists(self, listA: list, listB: list) -> list:
return [x for x in listA if x not in listB]
def merge_dicts_and_average(self, dicts_list: list):
result_dict = {}
num_dicts = len(dicts_list)
for d in dicts_list:
for key, value in d.items():
if key in result_dict:
result_dict[key] += value
else:
result_dict[key] = value
for key in result_dict:
result_dict[key] /= num_dicts
return result_dict
def training_step(self, batch: Any, batch_idx: int) -> STEP_OUTPUT:
batch = merge_mixed_batches(batch)
data = self.get_data_from_batch(batch)
worker_id = self.get_worker_id_from_batch(batch)
mode = Mode.TRAIN
self.started_training = True
step = self.trainer.global_step
ev_tensor_sequence = data[DataType.EV_REPR]
sparse_obj_labels = data[DataType.OBJLABELS_SEQ]
is_first_sample = data[DataType.IS_FIRST_SAMPLE]
token_mask_sequence = data.get(DataType.TOKEN_MASK, None)
self.mode_2_rnn_states[mode].reset(worker_id=worker_id, indices_or_bool_tensor=is_first_sample)
self.reg_states.reset(worker_id=worker_id, indices_or_bool_tensor=is_first_sample)
sequence_len = len(ev_tensor_sequence)
assert sequence_len > 0
batch_size = len(sparse_obj_labels[0])
if self.mode_2_batch_size[mode] is None:
self.mode_2_batch_size[mode] = batch_size
else:
assert self.mode_2_batch_size[mode] == batch_size
prev_states = self.mode_2_rnn_states[mode].get_states(worker_id=worker_id)
prev_reg = self.reg_states.get_states(worker_id=worker_id)
ev_repr_selector = EventReprSelector()
obj_labels = list()
predictions_list = list()
losses_list = list()
if type(self.training_classes) != list:
self.training_classes = list(self.training_classes.keys())
else:
self.training_classes = self.training_classes
first_valid_flag = True
for tidx in range(sequence_len):
ev_tensors = ev_tensor_sequence[tidx]
ev_tensors = ev_tensors.to(dtype=self.dtype)
ev_tensors = self.input_padder.pad_tensor_ev_repr(ev_tensors)
if token_mask_sequence is not None:
token_masks = self.input_padder.pad_token_mask(token_mask=token_mask_sequence[tidx])
else:
token_masks = None
if self.mode_2_hw[mode] is None:
self.mode_2_hw[mode] = tuple(ev_tensors.shape[-2:])
else:
assert self.mode_2_hw[mode] == ev_tensors.shape[-2:]
backbone_features, states = self.mdl.forward_backbone(x=ev_tensors,
previous_states=prev_states,
token_mask=token_masks)
prev_states = states
current_labels, valid_batch_indices = sparse_obj_labels[tidx].get_valid_labels_and_batch_indices()
inference_valid = self.subtract_lists(list(range(batch_size)), valid_batch_indices)
#get the feature
if len(current_labels) > 0:
backbone_feature_selector = BackboneFeatureSelector()
backbone_feature_selector.add_backbone_features(backbone_features=backbone_features,
selected_indices=valid_batch_indices)
selected_backbone_features = backbone_feature_selector.get_batched_backbone_features()
#get the label
|
def remove_elements(ori_items, moving_items):
return [elem for elem in ori_items if elem not in moving_items]
class Module(pl.LightningModule):
def __init__(self, full_config: DictConfig):
super().__init__()
self.full_config = full_config
self.mdl_config = full_config.model
in_res_hw = tuple(self.mdl_config.backbone.in_res_hw)
self.input_padder = InputPadderFromShape(desired_hw=in_res_hw)
self.mdl = YoloXDetector(self.mdl_config)
self.mode_2_rnn_states: Dict[Mode, RNNStates] = {
Mode.TRAIN: RNNStates(),
Mode.VAL: RNNStates(),
Mode.TEST: RNNStates(),
}
self.reg_states = REGStates()
def setup(self, stage: Optional[str] = None) -> None:
dataset_name = self.full_config.dataset.name
self.mode_2_hw: Dict[Mode, Optional[Tuple[int, int]]] = {}
self.mode_2_batch_size: Dict[Mode, Optional[int]] = {}
self.mode_2_psee_evaluator: Dict[Mode, Optional[PropheseeEvaluator]] = {}
self.mode_2_sampling_mode: Dict[Mode, DatasetSamplingMode] = {}
self.started_training = True
dataset_train_sampling = self.full_config.dataset.train.sampling
dataset_eval_sampling = self.full_config.dataset.eval.sampling
assert dataset_train_sampling in iter(DatasetSamplingMode)
assert dataset_eval_sampling in (DatasetSamplingMode.STREAM, DatasetSamplingMode.RANDOM)
if stage == 'fit': # train + val
self.training_classes = self.full_config.dataset.training_classes
self.unseen_classes = self.full_config.dataset.unseen_classes
self.testing_classes = self.full_config.dataset.testing_classes
self.train_config = self.full_config.training
self.train_metrics_config = self.full_config.logging.train.metrics
if self.train_metrics_config.compute:
self.mode_2_psee_evaluator[Mode.TRAIN] = PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2)
#We set two evaluator, one (0) for unseen classes and one (1) for all classes
self.mode_2_psee_evaluator[Mode.VAL] = [PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2),
PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2)
]
self.mode_2_sampling_mode[Mode.TRAIN] = dataset_train_sampling
self.mode_2_sampling_mode[Mode.VAL] = dataset_eval_sampling
for mode in (Mode.TRAIN, Mode.VAL):
self.mode_2_hw[mode] = None
self.mode_2_batch_size[mode] = None
self.started_training = False
elif stage == 'validate':
self.unseen_classes = self.full_config.dataset.unseen_classes
self.testing_classes = self.full_config.dataset.testing_classes
mode = Mode.VAL
self.mode_2_psee_evaluator[mode] = [PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2),
PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2)
]
self.mode_2_sampling_mode[Mode.VAL] = dataset_eval_sampling
self.mode_2_hw[mode] = None
self.mode_2_batch_size[mode] = None
elif stage == 'test':
mode = Mode.TEST
self.unseen_classes = self.full_config.dataset.unseen_classes
self.testing_classes = self.full_config.dataset.testing_classes
self.mode_2_psee_evaluator[Mode.TEST] = [PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2),
PropheseeEvaluator(
dataset=dataset_name, downsample_by_2=self.full_config.dataset.downsample_by_factor_2)
]
self.mode_2_sampling_mode[Mode.TEST] = dataset_eval_sampling
self.mode_2_hw[mode] = None
self.mode_2_batch_size[mode] = None
else:
raise NotImplementedError
def forward(self,
event_tensor: th.Tensor,
previous_states: Optional[LstmStates] = None,
retrieve_detections: bool = True,
targets=None) \
-> Tuple[Union[th.Tensor, None], Union[Dict[str, th.Tensor], None], LstmStates]:
return self.mdl(x=event_tensor,
previous_states=previous_states,
retrieve_detections=retrieve_detections,
targets=targets)
def get_worker_id_from_batch(self, batch: Any) -> int:
return batch['worker_id']
def get_data_from_batch(self, batch: Any):
return batch['data']
def vis_and_save_image(self, ev_pr, label, pred, unseen_classes,
save_dir = '/home/zht/python_project/RVT_CAOD_v9/save_img/', threshold = 0.3, topn = 10):
files = os.listdir(save_dir)
index = len(files)
ev_pr = ev_pr.to('cpu')
assert ev_pr.shape[0] % 2 == 0
num_bins = int(ev_pr.shape[0] / 2)
height = int(ev_pr.shape[1])
width = int(ev_pr.shape[2])
ev_pr = ev_pr.permute(1, 2, 0)
ev_pr = ev_pr.numpy()
frame = np.zeros((height, width, 3), dtype=np.uint8)
for i in range(num_bins):
pos_image = (ev_pr[:, :, i + num_bins]).astype(np.uint8)
neg_image = (ev_pr[:, :, i]).astype(np.uint8)
pos_image = cv2.equalizeHist(pos_image)
neg_image = cv2.equalizeHist(neg_image)
image = np.concatenate((neg_image[..., None], np.zeros((height, width, 1), dtype=np.uint8), pos_image[..., None]), axis=-1)
frame = np.add(frame, image)
frame = frame * 255.0
frame_copy = frame.copy()
# topn = label.shape[0]
fix_num_threshold = np.partition(pred['class_confidence'], -topn)[-topn]
if fix_num_threshold > threshold:
pass
else:
threshold = fix_num_threshold
mask = pred['class_confidence'] > threshold
pred = pred[mask]
for item in pred:
x, y, w, h = item['x'], item['y'], item['w'], item['h']
left = int(x)
top = int(y)
right = int(x + w)
bottom = int(y + h)
cv2.rectangle(frame, (left, top), (right, bottom), (255, 250, 250), 1)
for item in label:
x, y, w, h = item['x'], item['y'], item['w'], item['h']
class_id = item['class_id']
left = int(x)
top = int(y)
right = int(x + w)
bottom = int(y + h)
center = ((left + right) // 2, (top + bottom) // 2)
if class_id in unseen_classes:
color = (255, 165, 0)
cv2.putText(frame_copy, str(class_id), (center[0], bottom - 1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)
else:
color = (0, 255, 0)
cv2.rectangle(frame_copy, (left, top), (right, bottom), color, 1)
stacked_image = cv2.hconcat([frame, frame_copy])
save_path = save_dir + '{}.png'.format(index)
cv2.imwrite(save_path, stacked_image)
def concatenate_tensors(self, tensor1, tensor2, order1, order2):
D1 = tensor1.shape[0]
D2 = tensor2.shape[0]
D = D1 + D2
result_shape = (D,) + tensor1.shape[1:]
result = torch.zeros(result_shape, dtype=tensor1.dtype).to(tensor1.device)
for i, idx in enumerate(order1):
result[idx] = tensor1[i]
for i, idx in enumerate(order2):
result[idx] = tensor2[i]
return result
def subtract_lists(self, listA: list, listB: list) -> list:
return [x for x in listA if x not in listB]
def merge_dicts_and_average(self, dicts_list: list):
result_dict = {}
num_dicts = len(dicts_list)
for d in dicts_list:
for key, value in d.items():
if key in result_dict:
result_dict[key] += value
else:
result_dict[key] = value
for key in result_dict:
result_dict[key] /= num_dicts
return result_dict
def training_step(self, batch: Any, batch_idx: int) -> STEP_OUTPUT:
batch = merge_mixed_batches(batch)
data = self.get_data_from_batch(batch)
worker_id = self.get_worker_id_from_batch(batch)
mode = Mode.TRAIN
self.started_training = True
step = self.trainer.global_step
ev_tensor_sequence = data[DataType.EV_REPR]
sparse_obj_labels = data[DataType.OBJLABELS_SEQ]
is_first_sample = data[DataType.IS_FIRST_SAMPLE]
token_mask_sequence = data.get(DataType.TOKEN_MASK, None)
self.mode_2_rnn_states[mode].reset(worker_id=worker_id, indices_or_bool_tensor=is_first_sample)
self.reg_states.reset(worker_id=worker_id, indices_or_bool_tensor=is_first_sample)
sequence_len = len(ev_tensor_sequence)
assert sequence_len > 0
batch_size = len(sparse_obj_labels[0])
if self.mode_2_batch_size[mode] is None:
self.mode_2_batch_size[mode] = batch_size
else:
assert self.mode_2_batch_size[mode] == batch_size
prev_states = self.mode_2_rnn_states[mode].get_states(worker_id=worker_id)
prev_reg = self.reg_states.get_states(worker_id=worker_id)
ev_repr_selector = EventReprSelector()
obj_labels = list()
predictions_list = list()
losses_list = list()
if type(self.training_classes) != list:
self.training_classes = list(self.training_classes.keys())
else:
self.training_classes = self.training_classes
first_valid_flag = True
for tidx in range(sequence_len):
ev_tensors = ev_tensor_sequence[tidx]
ev_tensors = ev_tensors.to(dtype=self.dtype)
ev_tensors = self.input_padder.pad_tensor_ev_repr(ev_tensors)
if token_mask_sequence is not None:
token_masks = self.input_padder.pad_token_mask(token_mask=token_mask_sequence[tidx])
else:
token_masks = None
if self.mode_2_hw[mode] is None:
self.mode_2_hw[mode] = tuple(ev_tensors.shape[-2:])
else:
assert self.mode_2_hw[mode] == ev_tensors.shape[-2:]
backbone_features, states = self.mdl.forward_backbone(x=ev_tensors,
previous_states=prev_states,
token_mask=token_masks)
prev_states = states
current_labels, valid_batch_indices = sparse_obj_labels[tidx].get_valid_labels_and_batch_indices()
inference_valid = self.subtract_lists(list(range(batch_size)), valid_batch_indices)
#get the feature
if len(current_labels) > 0:
backbone_feature_selector = BackboneFeatureSelector()
backbone_feature_selector.add_backbone_features(backbone_features=backbone_features,
selected_indices=valid_batch_indices)
selected_backbone_features = backbone_feature_selector.get_batched_backbone_features()
#get the label | labels_yolox = ObjectLabels.get_labels_as_batched_tensor(obj_label_list=current_labels, training_classes = self.training_classes,format_='yolox') | 0 | 2023-12-29 04:04:34+00:00 | 12k |
Wangyuhao06/2022-adhoc | main.py | [
{
"identifier": "Environment",
"path": "src/env.py",
"snippet": "class Environment():\n #初始化环境\n def __init__(self):\n #初始数据-最大节点数\n self.node_max=NODE_MAX\n self.node_space_size=NODE_MAX\n self.node_moving_area=MOV_AREA\n #初始化二维平面\n self.geo_area = rando... | from src.env import Environment
from src.node import Node
from src.packet import Packet
from src.transtask import Trans_task
from src.DGN import DGN,DPG
from src.parameter import *
from src.buffereplay import ReplayBuffer
from queue import Queue
import math
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.autograd as autograd
import torch.nn.functional as F | 9,346 | os.environ['CUDA_VISIBLE_DEVICES'] = '1'
# import matplotlib.pyplot as plt
USE_CUDA = torch.cuda.is_available()
print(USE_CUDA)
env=Environment()
| os.environ['CUDA_VISIBLE_DEVICES'] = '1'
# import matplotlib.pyplot as plt
USE_CUDA = torch.cuda.is_available()
print(USE_CUDA)
env=Environment() | buff=ReplayBuffer(BUFFERSIZE) | 6 | 2023-12-30 09:35:30+00:00 | 12k |
alshubati99/BeamEye | uiElements/uiHandler 3.py | [
{
"identifier": "TkinterVideo",
"path": "uiElements/tkVideoPlayer.py",
"snippet": "class TkinterVideo(tk.Label):\n\n\tdef __init__(self, master, scaled: bool = True, consistant_frame_rate: bool = True, keep_aspect: bool = False,\n\t\t\t\t *args, **kwargs):\n\t\tsuper(TkinterVideo, self).__init__(master,... | import os.path
import shutil
import tkinter as tk
import customtkinter as ctk
import threading
import cv2
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
import uiElements.sharedVariables as User
from tkinter import PhotoImage, filedialog, messagebox
from uiElements.tkVideoPlayer import TkinterVideo
from uiElements.SettingsWindow import open_settings_window, settings_inherit_root
from time import sleep
from pathlib import Path
from shutil import move
from PIL import Image, ImageTk | 8,115 | # getting user screen size, change values to test different screen sizes
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
# define window size percentage, max is 1 == screen size
resize_ratio = .75
# setting window size 75% screen size
width, height = int(screen_width * resize_ratio), int((screen_width * resize_ratio) * 9 / 16)
# keeping 16:9 aspect ratio to match videos' placeholders
root.geometry(f"{int(screen_width * resize_ratio)}x{int((screen_width * resize_ratio) * 9 / 16)}")
# disable resizing
root.resizable(False, False)
root.configure(bg="black")
# root.geometry(f"{width}x{height}")
pc = "#30A8E6"
ended = False
crowd_is_included = None
progressbar, progressbar_progress, progressbar_placeholder_label = ctk.CTkProgressBar, 0, tk.Label
current_loading_canvas, current_video_canvas = tk.Canvas, tk.Canvas
# background_image_hello = PhotoImage(file=uiAssets + 'home2.png')
pedestrian_count_second, crowd_count_second = [], []
new_video = False
def set_aspect_ratio():
s_width = root.winfo_screenwidth()
s_height = root.winfo_screenheight()
# Initial aspect ratio adjustment
new_width = root.winfo_width()
new_height = int(new_width * 9 / 16)
# If height exceeds screen, adjust width based on screen height
if new_height > s_height:
new_height = s_height
new_width = int(new_height * 16 / 9)
# If width now exceeds screen, reduce both to fit within screen
if new_width > s_width:
new_width = s_width
new_height = int(new_width * 9 / 16)
# Apply the new dimensions
root.geometry(f"{new_width}x{new_height}")
def new_coordinates(old_x, old_y, old_width=None, old_height=None):
window_width, window_height = root.winfo_width(), root.winfo_height()
new_x = old_x * window_width / 1300
new_y = old_y * window_height / 750
if old_width is not None:
new_width = old_width * window_width / 1300
new_height = old_height * window_width / 750
return new_x, new_y, new_width, new_height
return new_x, new_y
def open_hello_window():
global current_canvas, main_root, w, h
# upload canvas
img_ = Image.open(uiAssets + 'home2.png')
resized_image_ = img_.resize((root.winfo_width(), root.winfo_height()))
tk_image_ = ImageTk.PhotoImage(resized_image_)
background_image_hello = tk_image_
hello_canvas = tk.Canvas(root, width=root.winfo_width() - 4, height=root.winfo_width() - 10)
current_canvas = hello_canvas
hello_canvas.place(x=0, y=0)
hello_canvas.create_image(root.winfo_width() / 2, root.winfo_height() / 2, image=background_image_hello, anchor="c")
# settings in upload window
progressbar_placeholder = ctk.CTkProgressBar(master=hello_canvas, height=20,
width=400, bg_color="#041632", fg_color="#041632",
progress_color="#30A8E6", border_color="#30A8E6",
border_width=2, indeterminate_speed=0.01, mode='determinate'
)
progressbar_placeholder.place(x=root.winfo_width() / 2 - 200, y=root.winfo_height() / 2 + 60)
progressbar_placeholder.set(0)
# settings canvas
def wait_for_tenserflow_import():
sleep(1)
for _ in range(7): # takes around 7 seconds to import tensorflow
for __ in range(7): # each .step() increases the bar by 2%, 7x7x2 = 98% of the bar after 7 seconds
progressbar_placeholder.step()
sleep(1 / 7)
progressbar_placeholder.set(1) # set the bar to 100%
sleep(1)
hello_canvas.destroy()
return
threading.Thread(target=wait_for_tenserflow_import).start()
def seconds_to_hhmmss(seconds):
hours, remainder = divmod(seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return "{:02d}:{:02d}:{:02d}".format(int(hours), int(minutes), int(seconds))
def update_current_timestamp(stamp: ctk.CTkLabel, timestamp: str): # ,
stamp.configure(text=timestamp)
pass
video_end = False
current_canvas = None
main_root, w, h = None, 0, 0
def open_video_window():
global root, current_video_canvas
# threading.Thread(target=open_hello_window).start()
video_canvas = tk.Canvas(root, bg="#051735")
|
input_video_path = ""
thread_crowd, thread_people, threads_started = threading.Thread, threading.Thread, False
current_pd_number_color, current_crowd_number_color = None, None
parent = Path(__file__).resolve().parent
# if called from uiHandler will return uiElements
# if called from BeamEye.py will return GP
# we need GP//uiAssets path for ui assets
# following block is to get path to folder of the app (GP), whatever its (new) name is
# and add \\uiuAssets\\ to it
# if the parent folder isn't GP ==> a sub-folder of GP
while not os.path.isdir(str(parent) + '\\uiAssets\\'):
# go back to its parent
parent = parent.parent
GP_path = parent
uiAssets = str(GP_path) + '\\uiAssets\\'
root = tk.Tk()
root.title("BeamEye")
root.iconbitmap(uiAssets + "logo.ico")
# UI has too many elements to control during resizing, especially during video
# playback, we get screen size and base the app window on a smaller area
# before resizing is disabled.
# getting user screen size, change values to test different screen sizes
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
# define window size percentage, max is 1 == screen size
resize_ratio = .75
# setting window size 75% screen size
width, height = int(screen_width * resize_ratio), int((screen_width * resize_ratio) * 9 / 16)
# keeping 16:9 aspect ratio to match videos' placeholders
root.geometry(f"{int(screen_width * resize_ratio)}x{int((screen_width * resize_ratio) * 9 / 16)}")
# disable resizing
root.resizable(False, False)
root.configure(bg="black")
# root.geometry(f"{width}x{height}")
pc = "#30A8E6"
ended = False
crowd_is_included = None
progressbar, progressbar_progress, progressbar_placeholder_label = ctk.CTkProgressBar, 0, tk.Label
current_loading_canvas, current_video_canvas = tk.Canvas, tk.Canvas
# background_image_hello = PhotoImage(file=uiAssets + 'home2.png')
pedestrian_count_second, crowd_count_second = [], []
new_video = False
def set_aspect_ratio():
s_width = root.winfo_screenwidth()
s_height = root.winfo_screenheight()
# Initial aspect ratio adjustment
new_width = root.winfo_width()
new_height = int(new_width * 9 / 16)
# If height exceeds screen, adjust width based on screen height
if new_height > s_height:
new_height = s_height
new_width = int(new_height * 16 / 9)
# If width now exceeds screen, reduce both to fit within screen
if new_width > s_width:
new_width = s_width
new_height = int(new_width * 9 / 16)
# Apply the new dimensions
root.geometry(f"{new_width}x{new_height}")
def new_coordinates(old_x, old_y, old_width=None, old_height=None):
window_width, window_height = root.winfo_width(), root.winfo_height()
new_x = old_x * window_width / 1300
new_y = old_y * window_height / 750
if old_width is not None:
new_width = old_width * window_width / 1300
new_height = old_height * window_width / 750
return new_x, new_y, new_width, new_height
return new_x, new_y
def open_hello_window():
global current_canvas, main_root, w, h
# upload canvas
img_ = Image.open(uiAssets + 'home2.png')
resized_image_ = img_.resize((root.winfo_width(), root.winfo_height()))
tk_image_ = ImageTk.PhotoImage(resized_image_)
background_image_hello = tk_image_
hello_canvas = tk.Canvas(root, width=root.winfo_width() - 4, height=root.winfo_width() - 10)
current_canvas = hello_canvas
hello_canvas.place(x=0, y=0)
hello_canvas.create_image(root.winfo_width() / 2, root.winfo_height() / 2, image=background_image_hello, anchor="c")
# settings in upload window
progressbar_placeholder = ctk.CTkProgressBar(master=hello_canvas, height=20,
width=400, bg_color="#041632", fg_color="#041632",
progress_color="#30A8E6", border_color="#30A8E6",
border_width=2, indeterminate_speed=0.01, mode='determinate'
)
progressbar_placeholder.place(x=root.winfo_width() / 2 - 200, y=root.winfo_height() / 2 + 60)
progressbar_placeholder.set(0)
# settings canvas
def wait_for_tenserflow_import():
sleep(1)
for _ in range(7): # takes around 7 seconds to import tensorflow
for __ in range(7): # each .step() increases the bar by 2%, 7x7x2 = 98% of the bar after 7 seconds
progressbar_placeholder.step()
sleep(1 / 7)
progressbar_placeholder.set(1) # set the bar to 100%
sleep(1)
hello_canvas.destroy()
return
threading.Thread(target=wait_for_tenserflow_import).start()
def seconds_to_hhmmss(seconds):
hours, remainder = divmod(seconds, 3600)
minutes, seconds = divmod(remainder, 60)
return "{:02d}:{:02d}:{:02d}".format(int(hours), int(minutes), int(seconds))
def update_current_timestamp(stamp: ctk.CTkLabel, timestamp: str): # ,
stamp.configure(text=timestamp)
pass
video_end = False
current_canvas = None
main_root, w, h = None, 0, 0
def open_video_window():
global root, current_video_canvas
# threading.Thread(target=open_hello_window).start()
video_canvas = tk.Canvas(root, bg="#051735") | settings_inherit_root(root) | 2 | 2023-12-26 18:39:25+00:00 | 12k |
camenduru/MotionCtrl-hf | app.py | [
{
"identifier": "CAMERA_MOTION_MODE",
"path": "gradio_utils/camera_utils.py",
"snippet": "CAMERA_MOTION_MODE = [\"Basic Camera Poses\", \"Provided Complex Camera Poses\", \"Custom Camera Poses\"]"
},
{
"identifier": "process_camera",
"path": "gradio_utils/camera_utils.py",
"snippet": "de... | import argparse
import os
import tempfile
import cv2
import gradio as gr
import imageio
import numpy as np
import torch
import torchvision
from functools import partial
from omegaconf import OmegaConf
from PIL import Image
from pytorch_lightning import seed_everything
from gradio_utils.camera_utils import CAMERA_MOTION_MODE, process_camera
from gradio_utils.traj_utils import (OBJECT_MOTION_MODE, get_provided_traj,
process_points, process_traj)
from gradio_utils.utils import vis_camera
from lvdm.models.samplers.ddim import DDIMSampler
from main.evaluation.motionctrl_inference import (DEFAULT_NEGATIVE_PROMPT,
load_model_checkpoint,
post_prompt)
from utils.utils import instantiate_from_config | 8,205 | camera_dict['speed'] = camera_speed
return display_camera_info(camera_dict)
def reset_camera():
global camera_dict
camera_dict = {
"motion":[],
"mode": "Customized Mode 1: First A then B",
"speed": 1.0,
"complex": None
}
return display_camera_info(camera_dict)
def fn_traj_droplast():
global traj_list
if traj_list:
traj_list.pop()
if traj_list:
traj_str = [f"{traj}" for traj in traj_list]
return ", ".join(traj_str)
else:
return "Click to specify trajectory"
def fn_traj_reset():
global traj_list
traj_list = []
return "Click to specify trajectory"
###########################################
model_path='./motionctrl.pth?download=true'
config_path='./configs/inference/config_both.yaml'
if not os.path.exists(model_path):
os.system(f'wget https://huggingface.co/TencentARC/MotionCtrl/resolve/main/motionctrl.pth?download=true -P .')
config = OmegaConf.load(config_path)
model_config = config.pop("model", OmegaConf.create())
model = instantiate_from_config(model_config)
if torch.cuda.is_available():
model = model.cuda()
model = load_model_checkpoint(model, model_path)
model.eval()
def model_run(prompts, infer_mode, seed, n_samples):
global traj_list
global camera_dict
RT = process_camera(camera_dict).reshape(-1,12)
traj_flow = process_traj(traj_list).transpose(3,0,1,2)
print(prompts)
print(RT.shape)
print(traj_flow.shape)
noise_shape = [1, 4, 16, 32, 32]
unconditional_guidance_scale = 7.5
unconditional_guidance_scale_temporal = None
# n_samples = 1
ddim_steps= 50
ddim_eta=1.0
cond_T=800
if n_samples < 1:
n_samples = 1
if n_samples > 4:
n_samples = 4
seed_everything(seed)
if infer_mode == MODE[0]:
camera_poses = RT
camera_poses = torch.tensor(camera_poses).float()
camera_poses = camera_poses.unsqueeze(0)
trajs = None
if torch.cuda.is_available():
camera_poses = camera_poses.cuda()
elif infer_mode == MODE[1]:
trajs = traj_flow
trajs = torch.tensor(trajs).float()
trajs = trajs.unsqueeze(0)
camera_poses = None
if torch.cuda.is_available():
trajs = trajs.cuda()
else:
camera_poses = RT
trajs = traj_flow
camera_poses = torch.tensor(camera_poses).float()
trajs = torch.tensor(trajs).float()
camera_poses = camera_poses.unsqueeze(0)
trajs = trajs.unsqueeze(0)
if torch.cuda.is_available():
camera_poses = camera_poses.cuda()
trajs = trajs.cuda()
ddim_sampler = DDIMSampler(model)
batch_size = noise_shape[0]
## get condition embeddings (support single prompt only)
if isinstance(prompts, str):
prompts = [prompts]
for i in range(len(prompts)):
prompts[i] = f'{prompts[i]}, {post_prompt}'
cond = model.get_learned_conditioning(prompts)
if camera_poses is not None:
RT = camera_poses[..., None]
else:
RT = None
if trajs is not None:
traj_features = model.get_traj_features(trajs)
else:
traj_features = None
if unconditional_guidance_scale != 1.0:
# prompts = batch_size * [""]
|
os.environ['KMP_DUPLICATE_LIB_OK']='True'
#### Description ####
title = r"""<h1 align="center">MotionCtrl: A Unified and Flexible Motion Controller for Video Generation</h1>"""
description = r"""
<b>Official Gradio demo</b> for <a href='https://github.com/TencentARC/MotionCtrl' target='_blank'><b>MotionCtrl: A Unified and Flexible Motion Controller for Video Generation</b></a>.<br>
🔥 MotionCtrl is capable of independently and flexibly controling the camera motion and object motion of a generated video, with only a unified model.<br>
🤗 Try to control the motion of the generated videos yourself!<br>
❗❗❗ Please note that current version of **MotionCtrl** is deployed on **LVDM/VideoCrafter**. The versions that depolyed on **AnimateDiff** and **SVD** will be released soon.<br>
"""
article = r"""
If MotionCtrl is helpful, please help to ⭐ the <a href='https://github.com/TencentARC/MotionCtrl' target='_blank'>Github Repo</a>. Thanks!
[](https://github.com/TencentARC/MotionCtrl)
---
📝 **Citation**
<br>
If our work is useful for your research, please consider citing:
```bibtex
@inproceedings{wang2023motionctrl,
title={MotionCtrl: A Unified and Flexible Motion Controller for Video Generation},
author={Wang, Zhouxia and Yuan, Ziyang and Wang, Xintao and Chen, Tianshui and Xia, Menghan and Luo, Ping and Shan, Yin},
booktitle={arXiv preprint arXiv:2312.03641},
year={2023}
}
```
📧 **Contact**
<br>
If you have any questions, please feel free to reach me out at <b>wzhoux@connect.hku.hk</b>.
"""
css = """
.gradio-container {width: 85% !important}
.gr-monochrome-group {border-radius: 5px !important; border: revert-layer !important; border-width: 2px !important; color: black !important;}
span.svelte-s1r2yt {font-size: 17px !important; font-weight: bold !important; color: #d30f2f !important;}
button {border-radius: 8px !important;}
.add_button {background-color: #4CAF50 !important;}
.remove_button {background-color: #f44336 !important;}
.clear_button {background-color: gray !important;}
.mask_button_group {gap: 10px !important;}
.video {height: 300px !important;}
.image {height: 300px !important;}
.video .wrap.svelte-lcpz3o {display: flex !important; align-items: center !important; justify-content: center !important;}
.video .wrap.svelte-lcpz3o > :first-child {height: 100% !important;}
.margin_center {width: 50% !important; margin: auto !important;}
.jc_center {justify-content: center !important;}
"""
T_base = [
[1.,0.,0.], ## W2C x 的正方向: 相机朝左 left
[-1.,0.,0.], ## W2C x 的负方向: 相机朝右 right
[0., 1., 0.], ## W2C y 的正方向: 相机朝上 up
[0.,-1.,0.], ## W2C y 的负方向: 相机朝下 down
[0.,0.,1.], ## W2C z 的正方向: 相机往前 zoom out
[0.,0.,-1.], ## W2C z 的负方向: 相机往前 zoom in
]
radius = 1
n = 16
# step =
look_at = np.array([0, 0, 0.8]).reshape(3,1)
# look_at = np.array([0, 0, 0.2]).reshape(3,1)
T_list = []
base_R = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
res = []
res_forsave = []
T_range = 1.8
for i in range(0, 16):
# theta = (1)*np.pi*i/n
R = base_R[:,:3]
T = np.array([0.,0.,1.]).reshape(3,1) * (i/n)*2
RT = np.concatenate([R,T], axis=1)
res.append(RT)
fig = vis_camera(res)
# MODE = ["camera motion control", "object motion control", "camera + object motion control"]
MODE = ["control camera poses", "control object trajectory", "control both camera and object motion"]
BASE_MODEL = ['LVDM/VideoCrafter', 'AnimateDiff', 'SVD']
traj_list = []
camera_dict = {
"motion":[],
"mode": "Customized Mode 1: First A then B", # "First A then B", "Both A and B", "Custom"
"speed": 1.0,
"complex": None
}
def fn_vis_camera(info_mode):
global camera_dict
RT = process_camera(camera_dict) # [t, 3, 4]
if camera_dict['complex'] is not None:
# rescale T to [-2,2]
for i in range(3):
min_T = np.min(RT[:,i,-1])
max_T = np.max(RT[:,i,-1])
if min_T < -2 or max_T > 2:
RT[:,i,-1] = RT[:,i,-1] - min_T
RT[:,i,-1] = RT[:,i,-1] / (np.max(RT[:,:,-1]) + 1e-6)
RT[:,i,-1] = RT[:,i,-1] * 4
RT[:,i,-1] = RT[:,i,-1] - 2
fig = vis_camera(RT)
if info_mode == MODE[0]:
vis_step3_prompt_generate = True
vis_prompt = True
vis_num_samples = True
vis_seed = True
vis_start = True
vis_gen_video = True
vis_object_mode = False
vis_object_info = False
else:
vis_step3_prompt_generate = False
vis_prompt = False
vis_num_samples = False
vis_seed = False
vis_start = False
vis_gen_video = False
vis_object_mode = True
vis_object_info = True
return fig, \
gr.update(visible=vis_object_mode), \
gr.update(visible=vis_object_info), \
gr.update(visible=vis_step3_prompt_generate), \
gr.update(visible=vis_prompt), \
gr.update(visible=vis_num_samples), \
gr.update(visible=vis_seed), \
gr.update(visible=vis_start), \
gr.update(visible=vis_gen_video, value=None)
def fn_vis_traj():
global traj_list
xy_range = 1024
points = process_points(traj_list)
imgs = []
for idx in range(16):
bg_img = np.ones((1024, 1024, 3), dtype=np.uint8) * 255
for i in range(15):
p = points[i]
p1 = points[i+1]
cv2.line(bg_img, p, p1, (255, 0, 0), 2)
if i == idx:
cv2.circle(bg_img, p, 2, (0, 255, 0), 20)
if idx==(15):
cv2.circle(bg_img, points[-1], 2, (0, 255, 0), 20)
imgs.append(bg_img.astype(np.uint8))
# size = (512, 512)
fps = 10
path = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
writer = imageio.get_writer(path, format='mp4', mode='I', fps=fps)
for img in imgs:
writer.append_data(img)
writer.close()
vis_step3_prompt_generate = True
vis_prompt = True
vis_num_samples = True
vis_seed = True
vis_start = True
vis_gen_video = True
return path, gr.update(visible=vis_step3_prompt_generate), \
gr.update(visible=vis_prompt), \
gr.update(visible=vis_num_samples), \
gr.update(visible=vis_seed), \
gr.update(visible=vis_start), \
gr.update(visible=vis_gen_video, value=None)
def display_camera_info(camera_dict, camera_mode=None):
if camera_dict['complex'] is not None:
res = f"complex : {camera_dict['complex']}. "
else:
res = ""
res += f"motion : {[_ for _ in camera_dict['motion']]}. "
res += f"speed : {camera_dict['speed']}. "
if camera_mode == CAMERA_MOTION_MODE[2]:
res += f"mode : {camera_dict['mode']}. "
return res
def add_traj_point(evt: gr.SelectData, ):
global traj_list
traj_list.append(evt.index)
traj_str = [f"{traj}" for traj in traj_list]
return ", ".join(traj_str)
def add_provided_traj(traj_name):
global traj_list
traj_list = get_provided_traj(traj_name)
traj_str = [f"{traj}" for traj in traj_list]
return ", ".join(traj_str)
def add_camera_motion(camera_motion, camera_mode):
global camera_dict
if camera_dict['complex'] is not None:
camera_dict['complex'] = None
if camera_mode == CAMERA_MOTION_MODE[2] and len(camera_dict['motion']) <2:
camera_dict['motion'].append(camera_motion)
else:
camera_dict['motion']=[camera_motion]
return display_camera_info(camera_dict, camera_mode)
def add_complex_camera_motion(camera_motion):
global camera_dict
camera_dict['complex']=camera_motion
return display_camera_info(camera_dict)
def change_camera_mode(combine_type, camera_mode):
global camera_dict
camera_dict['mode'] = combine_type
return display_camera_info(camera_dict, camera_mode)
def change_camera_speed(camera_speed):
global camera_dict
camera_dict['speed'] = camera_speed
return display_camera_info(camera_dict)
def reset_camera():
global camera_dict
camera_dict = {
"motion":[],
"mode": "Customized Mode 1: First A then B",
"speed": 1.0,
"complex": None
}
return display_camera_info(camera_dict)
def fn_traj_droplast():
global traj_list
if traj_list:
traj_list.pop()
if traj_list:
traj_str = [f"{traj}" for traj in traj_list]
return ", ".join(traj_str)
else:
return "Click to specify trajectory"
def fn_traj_reset():
global traj_list
traj_list = []
return "Click to specify trajectory"
###########################################
model_path='./motionctrl.pth?download=true'
config_path='./configs/inference/config_both.yaml'
if not os.path.exists(model_path):
os.system(f'wget https://huggingface.co/TencentARC/MotionCtrl/resolve/main/motionctrl.pth?download=true -P .')
config = OmegaConf.load(config_path)
model_config = config.pop("model", OmegaConf.create())
model = instantiate_from_config(model_config)
if torch.cuda.is_available():
model = model.cuda()
model = load_model_checkpoint(model, model_path)
model.eval()
def model_run(prompts, infer_mode, seed, n_samples):
global traj_list
global camera_dict
RT = process_camera(camera_dict).reshape(-1,12)
traj_flow = process_traj(traj_list).transpose(3,0,1,2)
print(prompts)
print(RT.shape)
print(traj_flow.shape)
noise_shape = [1, 4, 16, 32, 32]
unconditional_guidance_scale = 7.5
unconditional_guidance_scale_temporal = None
# n_samples = 1
ddim_steps= 50
ddim_eta=1.0
cond_T=800
if n_samples < 1:
n_samples = 1
if n_samples > 4:
n_samples = 4
seed_everything(seed)
if infer_mode == MODE[0]:
camera_poses = RT
camera_poses = torch.tensor(camera_poses).float()
camera_poses = camera_poses.unsqueeze(0)
trajs = None
if torch.cuda.is_available():
camera_poses = camera_poses.cuda()
elif infer_mode == MODE[1]:
trajs = traj_flow
trajs = torch.tensor(trajs).float()
trajs = trajs.unsqueeze(0)
camera_poses = None
if torch.cuda.is_available():
trajs = trajs.cuda()
else:
camera_poses = RT
trajs = traj_flow
camera_poses = torch.tensor(camera_poses).float()
trajs = torch.tensor(trajs).float()
camera_poses = camera_poses.unsqueeze(0)
trajs = trajs.unsqueeze(0)
if torch.cuda.is_available():
camera_poses = camera_poses.cuda()
trajs = trajs.cuda()
ddim_sampler = DDIMSampler(model)
batch_size = noise_shape[0]
## get condition embeddings (support single prompt only)
if isinstance(prompts, str):
prompts = [prompts]
for i in range(len(prompts)):
prompts[i] = f'{prompts[i]}, {post_prompt}'
cond = model.get_learned_conditioning(prompts)
if camera_poses is not None:
RT = camera_poses[..., None]
else:
RT = None
if trajs is not None:
traj_features = model.get_traj_features(trajs)
else:
traj_features = None
if unconditional_guidance_scale != 1.0:
# prompts = batch_size * [""] | prompts = batch_size * [DEFAULT_NEGATIVE_PROMPT] | 8 | 2023-12-27 19:32:03+00:00 | 12k |
0x00wolf/hkrsAI | hkrsai.py | [
{
"identifier": "fetch_args",
"path": "src/args.py",
"snippet": "def fetch_args():\n \"\"\"Function to handle command-line arguments\"\"\"\n p = argparse.ArgumentParser(\n formatter_class=argparse.RawTextHelpFormatter,\n prog='hkrsAI.v2',\n description=DESCRIPTION,\n ep... | import sys
import os
import readline
from src.args import fetch_args
from src.pathfinder import PathFinder
from src.client import Client
from src.gpt import GPT
from src.systemprompt import SystemPrompt
from src.conversation import Conversation
from src.action import Action
from src.inputparser import InputParser
from src.dispatcher import Dispatcher
from src.logger import Logger | 7,825 |
HKRSAI = """
1 0 1
1 1 0
0 0 0
hkrsAI.v2
"""
def main():
print(HKRSAI)
args = fetch_args() # command-line arguments
paths = PathFinder(cwd=os.path.dirname(os.path.abspath(__file__)))
parser = InputParser() # Class to parse user input and return Actions.
dispatcher = Dispatcher() # Manages conversation state and turnsActions into functions.
logger = Logger(paths=paths, log_level=args.log_level, log_format=args.log_format)
client = Client(config=paths.config) # OpenAI API client management object
client.initialize() # Checks for valid saved API key or prompts user. Tests keys before proceeding
gpt = GPT( # Class that contains GPTs parameters
client=client.client,
model=args.model,
temperature=args.temperature,
top_p=args.top_p,
n=args.n,
frequency_penalty=args.frequency_penalty,
presence_penalty=args.presence_penalty,
max_tokens=args.max_tokens
)
|
HKRSAI = """
1 0 1
1 1 0
0 0 0
hkrsAI.v2
"""
def main():
print(HKRSAI)
args = fetch_args() # command-line arguments
paths = PathFinder(cwd=os.path.dirname(os.path.abspath(__file__)))
parser = InputParser() # Class to parse user input and return Actions.
dispatcher = Dispatcher() # Manages conversation state and turnsActions into functions.
logger = Logger(paths=paths, log_level=args.log_level, log_format=args.log_format)
client = Client(config=paths.config) # OpenAI API client management object
client.initialize() # Checks for valid saved API key or prompts user. Tests keys before proceeding
gpt = GPT( # Class that contains GPTs parameters
client=client.client,
model=args.model,
temperature=args.temperature,
top_p=args.top_p,
n=args.n,
frequency_penalty=args.frequency_penalty,
presence_penalty=args.presence_penalty,
max_tokens=args.max_tokens
)
| system_prompt = SystemPrompt(prompts_dir=paths.prompts, path=args.system_prompt) | 4 | 2023-12-22 07:04:47+00:00 | 12k |
hughxiouge/CompoundE3D | run.py | [
{
"identifier": "KGEModel",
"path": "model.py",
"snippet": "class KGEModel(nn.Module):\n def __init__(self, model_name, nentity, nrelation, hidden_dim, gamma, evaluator,\n double_entity_embedding=False, \n double_relation_embedding=False, triple_relation_embedding=Fals... | import argparse
import json
import logging
import os
import random
import numpy as np
import torch
import time
import os.path as osp
from torch.utils.data import DataLoader
from model import KGEModel
from dataloader import TrainDataset
from dataloader import BidirectionalOneShotIterator
from ogb.linkproppred import LinkPropPredDataset, Evaluator
from collections import defaultdict
from tqdm import tqdm
from tensorboardX import SummaryWriter | 7,574 | logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S',
filename=log_file,
filemode='w'
)
if args.print_on_screen:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
def log_metrics(mode, step, metrics, writer):
'''
Print the evaluation logs
'''
for metric in metrics:
logging.info('%s %s at step %d: %f' % (mode, metric, step, metrics[metric]))
writer.add_scalar("_".join([mode, metric]), metrics[metric], step)
def main(args):
if (not args.do_train) and (not args.do_valid) and (not args.do_test) and (not args.evaluate_train):
raise ValueError('one of train/val/test mode must be choosed.')
if args.init_checkpoint:
override_config(args)
args.save_path = 'log/%s/%s/%s-%s/%s'%(args.dataset, args.model, args.hidden_dim, args.gamma, time.time()) if args.save_path == None else args.save_path
writer = SummaryWriter(args.save_path)
# Write logs to checkpoint and console
set_logger(args)
dataset = LinkPropPredDataset(name = args.dataset)
split_dict = dataset.get_edge_split()
nentity = dataset.graph['num_nodes']
nrelation = int(max(dataset.graph['edge_reltype'])[0])+1
evaluator = Evaluator(name = args.dataset)
args.nentity = nentity
args.nrelation = nrelation
logging.info('Model: %s' % args.model)
logging.info('Dataset: %s' % args.dataset)
logging.info('#entity: %d' % nentity)
logging.info('#relation: %d' % nrelation)
train_triples = split_dict['train']
logging.info('#train: %d' % len(train_triples['head']))
valid_triples = split_dict['valid']
logging.info('#valid: %d' % len(valid_triples['head']))
test_triples = split_dict['test']
logging.info('#test: %d' % len(test_triples['head']))
logging.info('relation type %s' % args.relation_type)
print('relation type %s' % args.relation_type)
test_set_file = ''
if args.relation_type == '1-1':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/1-1-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/1-1.pt'
elif args.relation_type == '1-n':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/1-n-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/1-n.pt'
elif args.relation_type == 'n-1':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/n-1-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/n-1.pt'
elif args.relation_type == 'n-n':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/n-n-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/n-n.pt'
if test_set_file != '':
if osp.exists(test_set_pre_processed):
test_triples = torch.load(test_set_pre_processed, 'rb')
print("load pre processed test set")
else:
test_triples_new = {}
test_triples_chosen = []
test_triples_new['head'] = []
test_triples_new['relation'] = []
test_triples_new['tail'] = []
test_triples_new['head_neg'] = []
test_triples_new['tail_neg'] = []
f_test = open(test_set_file, "r")
for line in f_test:
h, r, t = line.strip().split('\t')
h, r, t = int(h), int(r), int(t)
test_triples_chosen.append((h, r, t))
f_test.close()
for idx in range(len(test_triples['head'])):
h, r, t = test_triples['head'][idx], test_triples['relation'][idx], test_triples['tail'][idx]
if (h, r, t) in test_triples_chosen:
test_triples_new['head'].append(h)
test_triples_new['relation'].append(r)
test_triples_new['tail'].append(t)
test_triples_new['head_neg'].append(test_triples['head_neg'][idx])
test_triples_new['tail_neg'].append(test_triples['tail_neg'][idx])
print('Saving ...')
torch.save(test_triples_new, test_set_pre_processed, pickle_protocol=4)
test_triples = test_triples_new
logging.info('#test: %d' % len(test_triples['head']))
train_count, train_true_head, train_true_tail = defaultdict(lambda: 4), defaultdict(list), defaultdict(list)
f_train = open("train.txt", "w")
for i in tqdm(range(len(train_triples['head']))):
head, relation, tail = train_triples['head'][i], train_triples['relation'][i], train_triples['tail'][i]
train_count[(head, relation)] += 1
train_count[(tail, -relation-1)] += 1
train_true_head[(relation, tail)].append(head)
train_true_tail[(head, relation)].append(tail)
f_train.write("\t".join([str(head), str(relation), str(tail)]) + '\n')
f_train.close()
| #!/usr/bin/python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def parse_args(args=None):
parser = argparse.ArgumentParser(
description='Training and Testing Knowledge Graph Embedding Models',
usage='train.py [<args>] [-h | --help]'
)
parser.add_argument('--cuda', action='store_true', help='use GPU')
parser.add_argument('--do_train', action='store_true')
parser.add_argument('--do_valid', action='store_true')
parser.add_argument('--do_test', action='store_true')
parser.add_argument('--evaluate_train', action='store_true', help='Evaluate on training data')
parser.add_argument('--dataset', type=str, default='ogbl-wikikg2', help='dataset name, default to wikikg')
parser.add_argument('--model', default='TransE', type=str)
parser.add_argument('-de', '--double_entity_embedding', action='store_true')
parser.add_argument('-dr', '--double_relation_embedding', action='store_true')
parser.add_argument('-tr', '--triple_relation_embedding', action='store_true')
parser.add_argument('-qr', '--quad_relation_embedding', action='store_true')
parser.add_argument('-n', '--negative_sample_size', default=128, type=int)
parser.add_argument('-d', '--hidden_dim', default=500, type=int)
parser.add_argument('-g', '--gamma', default=12.0, type=float)
parser.add_argument('-adv', '--negative_adversarial_sampling', action='store_true')
parser.add_argument('-a', '--adversarial_temperature', default=1.0, type=float)
parser.add_argument('-b', '--batch_size', default=1024, type=int)
parser.add_argument('-r', '--regularization', default=0.0, type=float)
parser.add_argument('--test_batch_size', default=4, type=int, help='valid/test batch size')
parser.add_argument('--uni_weight', action='store_true',
help='Otherwise use subsampling weighting like in word2vec')
parser.add_argument('-lr', '--learning_rate', default=0.0001, type=float)
parser.add_argument('-cpu', '--cpu_num', default=10, type=int)
parser.add_argument('-init', '--init_checkpoint', default=None, type=str)
parser.add_argument('-save', '--save_path', default=None, type=str)
parser.add_argument('--max_steps', default=100000, type=int)
parser.add_argument('--warm_up_steps', default=None, type=int)
parser.add_argument('--save_checkpoint_steps', default=10000, type=int)
parser.add_argument('--valid_steps', default=10000, type=int)
parser.add_argument('--log_steps', default=100, type=int, help='train log every xx steps')
parser.add_argument('--test_log_steps', default=1000, type=int, help='valid/test log every xx steps')
parser.add_argument('--nentity', type=int, default=0, help='DO NOT MANUALLY SET')
parser.add_argument('--nrelation', type=int, default=0, help='DO NOT MANUALLY SET')
parser.add_argument('--print_on_screen', action='store_true', help='log on screen or not')
parser.add_argument('--ntriples_eval_train', type=int, default=200000, help='number of training triples to evaluate eventually')
parser.add_argument('--neg_size_eval_train', type=int, default=500, help='number of negative samples when evaluating training triples')
parser.add_argument('--relation_type', type=str, default='all', help='1-1, 1-n, n-1, n-n')
return parser.parse_args(args)
def override_config(args):
'''
Override model and data configuration
'''
with open(os.path.join(args.init_checkpoint, 'config.json'), 'r') as fjson:
argparse_dict = json.load(fjson)
args.dataset = argparse_dict['dataset']
args.model = argparse_dict['model']
args.double_entity_embedding = argparse_dict['double_entity_embedding']
args.double_relation_embedding = argparse_dict['double_relation_embedding']
args.triple_relation_embedding = argparse_dict['triple_relation_embedding']
args.quad_relation_embedding = argparse_dict['quad_relation_embedding']
args.hidden_dim = argparse_dict['hidden_dim']
args.test_batch_size = argparse_dict['test_batch_size']
def save_model(model, optimizer, save_variable_list, args):
'''
Save the parameters of the model and the optimizer,
as well as some other variables such as step and learning_rate
'''
argparse_dict = vars(args)
with open(os.path.join(args.save_path, 'config.json'), 'w') as fjson:
json.dump(argparse_dict, fjson)
entity_embedding = model.entity_embedding.detach().cpu().numpy()
np.save(
os.path.join(args.save_path, 'entity_embedding'),
entity_embedding
)
relation_embedding = model.relation_embedding.detach().cpu().numpy()
np.save(
os.path.join(args.save_path, 'relation_embedding'),
relation_embedding
)
def set_logger(args):
'''
Write logs to checkpoint and console
'''
if args.do_train:
log_file = os.path.join(args.save_path or args.init_checkpoint, 'train.log')
else:
log_file = os.path.join(args.save_path or args.init_checkpoint, 'test.log')
print(log_file)
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S',
filename=log_file,
filemode='w'
)
if args.print_on_screen:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
def log_metrics(mode, step, metrics, writer):
'''
Print the evaluation logs
'''
for metric in metrics:
logging.info('%s %s at step %d: %f' % (mode, metric, step, metrics[metric]))
writer.add_scalar("_".join([mode, metric]), metrics[metric], step)
def main(args):
if (not args.do_train) and (not args.do_valid) and (not args.do_test) and (not args.evaluate_train):
raise ValueError('one of train/val/test mode must be choosed.')
if args.init_checkpoint:
override_config(args)
args.save_path = 'log/%s/%s/%s-%s/%s'%(args.dataset, args.model, args.hidden_dim, args.gamma, time.time()) if args.save_path == None else args.save_path
writer = SummaryWriter(args.save_path)
# Write logs to checkpoint and console
set_logger(args)
dataset = LinkPropPredDataset(name = args.dataset)
split_dict = dataset.get_edge_split()
nentity = dataset.graph['num_nodes']
nrelation = int(max(dataset.graph['edge_reltype'])[0])+1
evaluator = Evaluator(name = args.dataset)
args.nentity = nentity
args.nrelation = nrelation
logging.info('Model: %s' % args.model)
logging.info('Dataset: %s' % args.dataset)
logging.info('#entity: %d' % nentity)
logging.info('#relation: %d' % nrelation)
train_triples = split_dict['train']
logging.info('#train: %d' % len(train_triples['head']))
valid_triples = split_dict['valid']
logging.info('#valid: %d' % len(valid_triples['head']))
test_triples = split_dict['test']
logging.info('#test: %d' % len(test_triples['head']))
logging.info('relation type %s' % args.relation_type)
print('relation type %s' % args.relation_type)
test_set_file = ''
if args.relation_type == '1-1':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/1-1-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/1-1.pt'
elif args.relation_type == '1-n':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/1-n-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/1-n.pt'
elif args.relation_type == 'n-1':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/n-1-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/n-1.pt'
elif args.relation_type == 'n-n':
test_set_file = './dataset/ogbl_wikikg/wikikg_P/n-n-id.txt'
test_set_pre_processed = './dataset/ogbl_wikikg/wikikg_P/n-n.pt'
if test_set_file != '':
if osp.exists(test_set_pre_processed):
test_triples = torch.load(test_set_pre_processed, 'rb')
print("load pre processed test set")
else:
test_triples_new = {}
test_triples_chosen = []
test_triples_new['head'] = []
test_triples_new['relation'] = []
test_triples_new['tail'] = []
test_triples_new['head_neg'] = []
test_triples_new['tail_neg'] = []
f_test = open(test_set_file, "r")
for line in f_test:
h, r, t = line.strip().split('\t')
h, r, t = int(h), int(r), int(t)
test_triples_chosen.append((h, r, t))
f_test.close()
for idx in range(len(test_triples['head'])):
h, r, t = test_triples['head'][idx], test_triples['relation'][idx], test_triples['tail'][idx]
if (h, r, t) in test_triples_chosen:
test_triples_new['head'].append(h)
test_triples_new['relation'].append(r)
test_triples_new['tail'].append(t)
test_triples_new['head_neg'].append(test_triples['head_neg'][idx])
test_triples_new['tail_neg'].append(test_triples['tail_neg'][idx])
print('Saving ...')
torch.save(test_triples_new, test_set_pre_processed, pickle_protocol=4)
test_triples = test_triples_new
logging.info('#test: %d' % len(test_triples['head']))
train_count, train_true_head, train_true_tail = defaultdict(lambda: 4), defaultdict(list), defaultdict(list)
f_train = open("train.txt", "w")
for i in tqdm(range(len(train_triples['head']))):
head, relation, tail = train_triples['head'][i], train_triples['relation'][i], train_triples['tail'][i]
train_count[(head, relation)] += 1
train_count[(tail, -relation-1)] += 1
train_true_head[(relation, tail)].append(head)
train_true_tail[(head, relation)].append(tail)
f_train.write("\t".join([str(head), str(relation), str(tail)]) + '\n')
f_train.close()
| kge_model = KGEModel( | 0 | 2023-12-29 22:57:53+00:00 | 12k |
daswer123/rvc-python | rvc_python/modules/vc/modules.py | [
{
"identifier": "load_audio",
"path": "rvc_python/lib/audio.py",
"snippet": "def load_audio(file, sr):\n file = (\n file.strip(\" \").strip('\"').strip(\"\\n\").strip('\"').strip(\" \")\n ) # 防止小白拷路径头尾带了空格和\"和回车\n if os.path.exists(file) == False:\n raise RuntimeError(\n ... | import traceback
import logging
import numpy as np
import soundfile as sf
import torch
from io import BytesIO
from rvc_python.lib.audio import load_audio, wav2
from rvc_python.lib.infer_pack.models import (
SynthesizerTrnMs256NSFsid,
SynthesizerTrnMs256NSFsid_nono,
SynthesizerTrnMs768NSFsid,
SynthesizerTrnMs768NSFsid_nono,
)
from rvc_python.modules.vc.pipeline import Pipeline
from rvc_python.modules.vc.utils import * | 9,413 |
logger = logging.getLogger(__name__)
class VC:
def __init__(self, lib_dir, config):
self.lib_dir = lib_dir
self.n_spk = None
self.tgt_sr = None
self.net_g = None
self.pipeline = None
self.cpt = None
self.version = None
self.if_f0 = None
self.version = None
self.hubert_model = None
self.config = config
def get_vc(self,sid,version = "v2", *to_return_protect):
# logger.info("Get sid: " + sid)
to_return_protect0 = {
"visible": self.if_f0 != 0,
"value": to_return_protect[0]
if self.if_f0 != 0 and to_return_protect
else 0.5,
"__type__": "update",
}
to_return_protect1 = {
"visible": self.if_f0 != 0,
"value": to_return_protect[1]
if self.if_f0 != 0 and to_return_protect
else 0.33,
"__type__": "update",
}
if sid == "" or sid == []:
if self.hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的
logger.info("Clean model cache")
del (self.net_g, self.n_spk, self.hubert_model, self.tgt_sr) # ,cpt
self.hubert_model = (
self.net_g
) = self.n_spk = self.hubert_model = self.tgt_sr = None
if torch.cuda.is_available():
torch.cuda.empty_cache()
###楼下不这么折腾清理不干净
self.if_f0 = self.cpt.get("f0", 1)
self.version = self.cpt.get("version", "v1")
if self.version == "v1":
if self.if_f0 == 1:
self.net_g = SynthesizerTrnMs256NSFsid(
*self.cpt["config"], is_half=self.config.is_half
)
else:
self.net_g = SynthesizerTrnMs256NSFsid_nono(*self.cpt["config"])
elif self.version == "v2":
if self.if_f0 == 1:
self.net_g = SynthesizerTrnMs768NSFsid(
*self.cpt["config"], is_half=self.config.is_half
)
else:
|
logger = logging.getLogger(__name__)
class VC:
def __init__(self, lib_dir, config):
self.lib_dir = lib_dir
self.n_spk = None
self.tgt_sr = None
self.net_g = None
self.pipeline = None
self.cpt = None
self.version = None
self.if_f0 = None
self.version = None
self.hubert_model = None
self.config = config
def get_vc(self,sid,version = "v2", *to_return_protect):
# logger.info("Get sid: " + sid)
to_return_protect0 = {
"visible": self.if_f0 != 0,
"value": to_return_protect[0]
if self.if_f0 != 0 and to_return_protect
else 0.5,
"__type__": "update",
}
to_return_protect1 = {
"visible": self.if_f0 != 0,
"value": to_return_protect[1]
if self.if_f0 != 0 and to_return_protect
else 0.33,
"__type__": "update",
}
if sid == "" or sid == []:
if self.hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的
logger.info("Clean model cache")
del (self.net_g, self.n_spk, self.hubert_model, self.tgt_sr) # ,cpt
self.hubert_model = (
self.net_g
) = self.n_spk = self.hubert_model = self.tgt_sr = None
if torch.cuda.is_available():
torch.cuda.empty_cache()
###楼下不这么折腾清理不干净
self.if_f0 = self.cpt.get("f0", 1)
self.version = self.cpt.get("version", "v1")
if self.version == "v1":
if self.if_f0 == 1:
self.net_g = SynthesizerTrnMs256NSFsid(
*self.cpt["config"], is_half=self.config.is_half
)
else:
self.net_g = SynthesizerTrnMs256NSFsid_nono(*self.cpt["config"])
elif self.version == "v2":
if self.if_f0 == 1:
self.net_g = SynthesizerTrnMs768NSFsid(
*self.cpt["config"], is_half=self.config.is_half
)
else: | self.net_g = SynthesizerTrnMs768NSFsid_nono(*self.cpt["config"]) | 5 | 2023-12-26 19:05:42+00:00 | 12k |
open-mmlab/Amphion | modules/wenet_extractor/transformer/asr_model.py | [
{
"identifier": "CTC",
"path": "modules/wenet_extractor/transformer/ctc.py",
"snippet": "class CTC(torch.nn.Module):\n \"\"\"CTC module\"\"\"\n\n def __init__(\n self,\n odim: int,\n encoder_output_size: int,\n dropout_rate: float = 0.0,\n reduce: bool = True,\n ... | from collections import defaultdict
from typing import Dict, List, Optional, Tuple
from torch.nn.utils.rnn import pad_sequence
from modules.wenet_extractor.transformer.ctc import CTC
from modules.wenet_extractor.transformer.decoder import TransformerDecoder
from modules.wenet_extractor.transformer.encoder import TransformerEncoder
from modules.wenet_extractor.transformer.label_smoothing_loss import LabelSmoothingLoss
from modules.wenet_extractor.utils.common import (
IGNORE_ID,
add_sos_eos,
log_add,
remove_duplicates_and_blank,
th_accuracy,
reverse_pad_list,
)
from modules.wenet_extractor.utils.mask import (
make_pad_mask,
mask_finished_preds,
mask_finished_scores,
subsequent_mask,
)
import torch
import torch.nn.functional as F | 8,948 | scores = torch.tensor(
[0.0] + [-float("inf")] * (beam_size - 1), dtype=torch.float
)
scores = (
scores.to(device).repeat([batch_size]).unsqueeze(1).to(device)
) # (B*N, 1)
end_flag = torch.zeros_like(scores, dtype=torch.bool, device=device)
cache: Optional[List[torch.Tensor]] = None
# 2. Decoder forward step by step
for i in range(1, maxlen + 1):
# Stop if all batch and all beam produce eos
if end_flag.sum() == running_size:
break
# 2.1 Forward decoder step
hyps_mask = (
subsequent_mask(i).unsqueeze(0).repeat(running_size, 1, 1).to(device)
) # (B*N, i, i)
# logp: (B*N, vocab)
logp, cache = self.decoder.forward_one_step(
encoder_out, encoder_mask, hyps, hyps_mask, cache
)
# 2.2 First beam prune: select topk best prob at current time
top_k_logp, top_k_index = logp.topk(beam_size) # (B*N, N)
top_k_logp = mask_finished_scores(top_k_logp, end_flag)
top_k_index = mask_finished_preds(top_k_index, end_flag, self.eos)
# 2.3 Second beam prune: select topk score with history
scores = scores + top_k_logp # (B*N, N), broadcast add
scores = scores.view(batch_size, beam_size * beam_size) # (B, N*N)
scores, offset_k_index = scores.topk(k=beam_size) # (B, N)
# Update cache to be consistent with new topk scores / hyps
cache_index = (offset_k_index // beam_size).view(-1) # (B*N)
base_cache_index = (
torch.arange(batch_size, device=device)
.view(-1, 1)
.repeat([1, beam_size])
* beam_size
).view(
-1
) # (B*N)
cache_index = base_cache_index + cache_index
cache = [torch.index_select(c, dim=0, index=cache_index) for c in cache]
scores = scores.view(-1, 1) # (B*N, 1)
# 2.4. Compute base index in top_k_index,
# regard top_k_index as (B*N*N),regard offset_k_index as (B*N),
# then find offset_k_index in top_k_index
base_k_index = (
torch.arange(batch_size, device=device)
.view(-1, 1)
.repeat([1, beam_size])
) # (B, N)
base_k_index = base_k_index * beam_size * beam_size
best_k_index = base_k_index.view(-1) + offset_k_index.view(-1) # (B*N)
# 2.5 Update best hyps
best_k_pred = torch.index_select(
top_k_index.view(-1), dim=-1, index=best_k_index
) # (B*N)
best_hyps_index = best_k_index // beam_size
last_best_k_hyps = torch.index_select(
hyps, dim=0, index=best_hyps_index
) # (B*N, i)
hyps = torch.cat(
(last_best_k_hyps, best_k_pred.view(-1, 1)), dim=1
) # (B*N, i+1)
# 2.6 Update end flag
end_flag = torch.eq(hyps[:, -1], self.eos).view(-1, 1)
# 3. Select best of best
scores = scores.view(batch_size, beam_size)
# TODO: length normalization
best_scores, best_index = scores.max(dim=-1)
best_hyps_index = (
best_index
+ torch.arange(batch_size, dtype=torch.long, device=device) * beam_size
)
best_hyps = torch.index_select(hyps, dim=0, index=best_hyps_index)
best_hyps = best_hyps[:, 1:]
return best_hyps, best_scores
def ctc_greedy_search(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
decoding_chunk_size: int = -1,
num_decoding_left_chunks: int = -1,
simulate_streaming: bool = False,
) -> List[List[int]]:
"""Apply CTC greedy search
Args:
speech (torch.Tensor): (batch, max_len, feat_dim)
speech_length (torch.Tensor): (batch, )
beam_size (int): beam size for beam search
decoding_chunk_size (int): decoding chunk for dynamic chunk
trained model.
<0: for decoding, use full chunk.
>0: for decoding, use fixed chunk size as set.
0: used for training, it's prohibited here
simulate_streaming (bool): whether do encoder forward in a
streaming fashion
Returns:
List[List[int]]: best path result
"""
assert speech.shape[0] == speech_lengths.shape[0]
assert decoding_chunk_size != 0
batch_size = speech.shape[0]
# Let's assume B = batch_size
encoder_out, encoder_mask = self._forward_encoder(
speech,
speech_lengths,
decoding_chunk_size,
num_decoding_left_chunks,
simulate_streaming,
) # (B, maxlen, encoder_dim)
maxlen = encoder_out.size(1)
encoder_out_lens = encoder_mask.squeeze(1).sum(1)
ctc_probs = self.ctc.log_softmax(encoder_out) # (B, maxlen, vocab_size)
topk_prob, topk_index = ctc_probs.topk(1, dim=2) # (B, maxlen, 1)
topk_index = topk_index.view(batch_size, maxlen) # (B, maxlen)
| # This module is from [WeNet](https://github.com/wenet-e2e/wenet).
# ## Citations
# ```bibtex
# @inproceedings{yao2021wenet,
# title={WeNet: Production oriented Streaming and Non-streaming End-to-End Speech Recognition Toolkit},
# author={Yao, Zhuoyuan and Wu, Di and Wang, Xiong and Zhang, Binbin and Yu, Fan and Yang, Chao and Peng, Zhendong and Chen, Xiaoyu and Xie, Lei and Lei, Xin},
# booktitle={Proc. Interspeech},
# year={2021},
# address={Brno, Czech Republic },
# organization={IEEE}
# }
# @article{zhang2022wenet,
# title={WeNet 2.0: More Productive End-to-End Speech Recognition Toolkit},
# author={Zhang, Binbin and Wu, Di and Peng, Zhendong and Song, Xingchen and Yao, Zhuoyuan and Lv, Hang and Xie, Lei and Yang, Chao and Pan, Fuping and Niu, Jianwei},
# journal={arXiv preprint arXiv:2203.15455},
# year={2022}
# }
#
class ASRModel(torch.nn.Module):
"""CTC-attention hybrid Encoder-Decoder model"""
def __init__(
self,
vocab_size: int,
encoder: TransformerEncoder,
decoder: TransformerDecoder,
ctc: CTC,
ctc_weight: float = 0.5,
ignore_id: int = IGNORE_ID,
reverse_weight: float = 0.0,
lsm_weight: float = 0.0,
length_normalized_loss: bool = False,
lfmmi_dir: str = "",
):
assert 0.0 <= ctc_weight <= 1.0, ctc_weight
super().__init__()
# note that eos is the same as sos (equivalent ID)
self.sos = vocab_size - 1
self.eos = vocab_size - 1
self.vocab_size = vocab_size
self.ignore_id = ignore_id
self.ctc_weight = ctc_weight
self.reverse_weight = reverse_weight
self.encoder = encoder
self.decoder = decoder
self.ctc = ctc
self.criterion_att = LabelSmoothingLoss(
size=vocab_size,
padding_idx=ignore_id,
smoothing=lsm_weight,
normalize_length=length_normalized_loss,
)
self.lfmmi_dir = lfmmi_dir
if self.lfmmi_dir != "":
self.load_lfmmi_resource()
def forward(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
text: torch.Tensor,
text_lengths: torch.Tensor,
) -> Dict[str, Optional[torch.Tensor]]:
"""Frontend + Encoder + Decoder + Calc loss
Args:
speech: (Batch, Length, ...)
speech_lengths: (Batch, )
text: (Batch, Length)
text_lengths: (Batch,)
"""
assert text_lengths.dim() == 1, text_lengths.shape
# Check that batch_size is unified
assert (
speech.shape[0]
== speech_lengths.shape[0]
== text.shape[0]
== text_lengths.shape[0]
), (speech.shape, speech_lengths.shape, text.shape, text_lengths.shape)
# 1. Encoder
encoder_out, encoder_mask = self.encoder(speech, speech_lengths)
encoder_out_lens = encoder_mask.squeeze(1).sum(1)
# 2a. Attention-decoder branch
if self.ctc_weight != 1.0:
loss_att, acc_att = self._calc_att_loss(
encoder_out, encoder_mask, text, text_lengths
)
else:
loss_att = None
# 2b. CTC branch or LF-MMI loss
if self.ctc_weight != 0.0:
if self.lfmmi_dir != "":
loss_ctc = self._calc_lfmmi_loss(encoder_out, encoder_mask, text)
else:
loss_ctc = self.ctc(encoder_out, encoder_out_lens, text, text_lengths)
else:
loss_ctc = None
if loss_ctc is None:
loss = loss_att
elif loss_att is None:
loss = loss_ctc
else:
loss = self.ctc_weight * loss_ctc + (1 - self.ctc_weight) * loss_att
return {"loss": loss, "loss_att": loss_att, "loss_ctc": loss_ctc}
def _calc_att_loss(
self,
encoder_out: torch.Tensor,
encoder_mask: torch.Tensor,
ys_pad: torch.Tensor,
ys_pad_lens: torch.Tensor,
) -> Tuple[torch.Tensor, float]:
ys_in_pad, ys_out_pad = add_sos_eos(ys_pad, self.sos, self.eos, self.ignore_id)
ys_in_lens = ys_pad_lens + 1
# reverse the seq, used for right to left decoder
r_ys_pad = reverse_pad_list(ys_pad, ys_pad_lens, float(self.ignore_id))
r_ys_in_pad, r_ys_out_pad = add_sos_eos(
r_ys_pad, self.sos, self.eos, self.ignore_id
)
# 1. Forward decoder
decoder_out, r_decoder_out, _ = self.decoder(
encoder_out,
encoder_mask,
ys_in_pad,
ys_in_lens,
r_ys_in_pad,
self.reverse_weight,
)
# 2. Compute attention loss
loss_att = self.criterion_att(decoder_out, ys_out_pad)
r_loss_att = torch.tensor(0.0)
if self.reverse_weight > 0.0:
r_loss_att = self.criterion_att(r_decoder_out, r_ys_out_pad)
loss_att = (
loss_att * (1 - self.reverse_weight) + r_loss_att * self.reverse_weight
)
acc_att = th_accuracy(
decoder_out.view(-1, self.vocab_size),
ys_out_pad,
ignore_label=self.ignore_id,
)
return loss_att, acc_att
def _forward_encoder(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
decoding_chunk_size: int = -1,
num_decoding_left_chunks: int = -1,
simulate_streaming: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor]:
# Let's assume B = batch_size
# 1. Encoder
if simulate_streaming and decoding_chunk_size > 0:
encoder_out, encoder_mask = self.encoder.forward_chunk_by_chunk(
speech,
decoding_chunk_size=decoding_chunk_size,
num_decoding_left_chunks=num_decoding_left_chunks,
) # (B, maxlen, encoder_dim)
else:
encoder_out, encoder_mask = self.encoder(
speech,
speech_lengths,
decoding_chunk_size=decoding_chunk_size,
num_decoding_left_chunks=num_decoding_left_chunks,
) # (B, maxlen, encoder_dim)
return encoder_out, encoder_mask
def encoder_extractor(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
decoding_chunk_size: int = -1,
num_decoding_left_chunks: int = -1,
simulate_streaming: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor]:
# assert speech.shape[0] == speech_lengths[0]
assert decoding_chunk_size != 0
batch_size = speech.shape[0]
encoder_out, encoder_mask = self._forward_encoder(
speech,
speech_lengths,
decoding_chunk_size,
num_decoding_left_chunks,
simulate_streaming,
) # (B, maxlen, encoder_dim)
return encoder_out
def recognize(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
beam_size: int = 10,
decoding_chunk_size: int = -1,
num_decoding_left_chunks: int = -1,
simulate_streaming: bool = False,
) -> torch.Tensor:
"""Apply beam search on attention decoder
Args:
speech (torch.Tensor): (batch, max_len, feat_dim)
speech_length (torch.Tensor): (batch, )
beam_size (int): beam size for beam search
decoding_chunk_size (int): decoding chunk for dynamic chunk
trained model.
<0: for decoding, use full chunk.
>0: for decoding, use fixed chunk size as set.
0: used for training, it's prohibited here
simulate_streaming (bool): whether do encoder forward in a
streaming fashion
Returns:
torch.Tensor: decoding result, (batch, max_result_len)
"""
assert speech.shape[0] == speech_lengths.shape[0]
assert decoding_chunk_size != 0
device = speech.device
batch_size = speech.shape[0]
# Let's assume B = batch_size and N = beam_size
# 1. Encoder
encoder_out, encoder_mask = self._forward_encoder(
speech,
speech_lengths,
decoding_chunk_size,
num_decoding_left_chunks,
simulate_streaming,
) # (B, maxlen, encoder_dim)
maxlen = encoder_out.size(1)
encoder_dim = encoder_out.size(2)
running_size = batch_size * beam_size
encoder_out = (
encoder_out.unsqueeze(1)
.repeat(1, beam_size, 1, 1)
.view(running_size, maxlen, encoder_dim)
) # (B*N, maxlen, encoder_dim)
encoder_mask = (
encoder_mask.unsqueeze(1)
.repeat(1, beam_size, 1, 1)
.view(running_size, 1, maxlen)
) # (B*N, 1, max_len)
hyps = torch.ones([running_size, 1], dtype=torch.long, device=device).fill_(
self.sos
) # (B*N, 1)
scores = torch.tensor(
[0.0] + [-float("inf")] * (beam_size - 1), dtype=torch.float
)
scores = (
scores.to(device).repeat([batch_size]).unsqueeze(1).to(device)
) # (B*N, 1)
end_flag = torch.zeros_like(scores, dtype=torch.bool, device=device)
cache: Optional[List[torch.Tensor]] = None
# 2. Decoder forward step by step
for i in range(1, maxlen + 1):
# Stop if all batch and all beam produce eos
if end_flag.sum() == running_size:
break
# 2.1 Forward decoder step
hyps_mask = (
subsequent_mask(i).unsqueeze(0).repeat(running_size, 1, 1).to(device)
) # (B*N, i, i)
# logp: (B*N, vocab)
logp, cache = self.decoder.forward_one_step(
encoder_out, encoder_mask, hyps, hyps_mask, cache
)
# 2.2 First beam prune: select topk best prob at current time
top_k_logp, top_k_index = logp.topk(beam_size) # (B*N, N)
top_k_logp = mask_finished_scores(top_k_logp, end_flag)
top_k_index = mask_finished_preds(top_k_index, end_flag, self.eos)
# 2.3 Second beam prune: select topk score with history
scores = scores + top_k_logp # (B*N, N), broadcast add
scores = scores.view(batch_size, beam_size * beam_size) # (B, N*N)
scores, offset_k_index = scores.topk(k=beam_size) # (B, N)
# Update cache to be consistent with new topk scores / hyps
cache_index = (offset_k_index // beam_size).view(-1) # (B*N)
base_cache_index = (
torch.arange(batch_size, device=device)
.view(-1, 1)
.repeat([1, beam_size])
* beam_size
).view(
-1
) # (B*N)
cache_index = base_cache_index + cache_index
cache = [torch.index_select(c, dim=0, index=cache_index) for c in cache]
scores = scores.view(-1, 1) # (B*N, 1)
# 2.4. Compute base index in top_k_index,
# regard top_k_index as (B*N*N),regard offset_k_index as (B*N),
# then find offset_k_index in top_k_index
base_k_index = (
torch.arange(batch_size, device=device)
.view(-1, 1)
.repeat([1, beam_size])
) # (B, N)
base_k_index = base_k_index * beam_size * beam_size
best_k_index = base_k_index.view(-1) + offset_k_index.view(-1) # (B*N)
# 2.5 Update best hyps
best_k_pred = torch.index_select(
top_k_index.view(-1), dim=-1, index=best_k_index
) # (B*N)
best_hyps_index = best_k_index // beam_size
last_best_k_hyps = torch.index_select(
hyps, dim=0, index=best_hyps_index
) # (B*N, i)
hyps = torch.cat(
(last_best_k_hyps, best_k_pred.view(-1, 1)), dim=1
) # (B*N, i+1)
# 2.6 Update end flag
end_flag = torch.eq(hyps[:, -1], self.eos).view(-1, 1)
# 3. Select best of best
scores = scores.view(batch_size, beam_size)
# TODO: length normalization
best_scores, best_index = scores.max(dim=-1)
best_hyps_index = (
best_index
+ torch.arange(batch_size, dtype=torch.long, device=device) * beam_size
)
best_hyps = torch.index_select(hyps, dim=0, index=best_hyps_index)
best_hyps = best_hyps[:, 1:]
return best_hyps, best_scores
def ctc_greedy_search(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
decoding_chunk_size: int = -1,
num_decoding_left_chunks: int = -1,
simulate_streaming: bool = False,
) -> List[List[int]]:
"""Apply CTC greedy search
Args:
speech (torch.Tensor): (batch, max_len, feat_dim)
speech_length (torch.Tensor): (batch, )
beam_size (int): beam size for beam search
decoding_chunk_size (int): decoding chunk for dynamic chunk
trained model.
<0: for decoding, use full chunk.
>0: for decoding, use fixed chunk size as set.
0: used for training, it's prohibited here
simulate_streaming (bool): whether do encoder forward in a
streaming fashion
Returns:
List[List[int]]: best path result
"""
assert speech.shape[0] == speech_lengths.shape[0]
assert decoding_chunk_size != 0
batch_size = speech.shape[0]
# Let's assume B = batch_size
encoder_out, encoder_mask = self._forward_encoder(
speech,
speech_lengths,
decoding_chunk_size,
num_decoding_left_chunks,
simulate_streaming,
) # (B, maxlen, encoder_dim)
maxlen = encoder_out.size(1)
encoder_out_lens = encoder_mask.squeeze(1).sum(1)
ctc_probs = self.ctc.log_softmax(encoder_out) # (B, maxlen, vocab_size)
topk_prob, topk_index = ctc_probs.topk(1, dim=2) # (B, maxlen, 1)
topk_index = topk_index.view(batch_size, maxlen) # (B, maxlen) | mask = make_pad_mask(encoder_out_lens, maxlen) # (B, maxlen) | 10 | 2023-11-15 09:19:27+00:00 | 12k |
banodoco/Steerable-Motion | imports/AdvancedControlNet/nodes.py | [
{
"identifier": "load_controlnet",
"path": "imports/AdvancedControlNet/control.py",
"snippet": "def load_controlnet(ckpt_path, timestep_keyframe: TimestepKeyframeGroupImport=None, model=None):\n control = comfy_cn.load_controlnet(ckpt_path, model=model)\n # TODO: support controlnet-lllite\n # i... | import numpy as np
import folder_paths
from torch import Tensor
from .control import load_controlnet, convert_to_advanced, ControlWeightsImport, ControlWeightTypeImport,\
LatentKeyframeGroupImport, TimestepKeyframeImport, TimestepKeyframeGroupImport, is_advanced_controlnet
from .control import StrengthInterpolationImport as SI
from .weight_nodes import DefaultWeightsImport, ScaledSoftMaskedUniversalWeightsImport, ScaledSoftUniversalWeightsImport, SoftControlNetWeightsImport, CustomControlNetWeightsImport, \
SoftT2IAdapterWeightsImport, CustomT2IAdapterWeightsImport
from .latent_keyframe_nodes import LatentKeyframeGroupNodeImport, LatentKeyframeInterpolationNodeImport, LatentKeyframeBatchedGroupNodeImport, LatentKeyframeNodeImport
from .logger import logger | 9,431 |
class TimestepKeyframeNodeImport:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}, ),
},
"optional": {
"prev_timestep_kf": ("TIMESTEP_KEYFRAME", ),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ),
"cn_weights": ("CONTROL_NET_WEIGHTS", ),
"latent_keyframe": ("LATENT_KEYFRAME", ),
"null_latent_kf_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.001}, ),
"inherit_missing": ("BOOLEAN", {"default": True}, ),
"guarantee_usage": ("BOOLEAN", {"default": True}, ),
"mask_optional": ("MASK", ),
#"interpolation": ([SI.LINEAR, SI.EASE_IN, SI.EASE_OUT, SI.EASE_IN_OUT, SI.NONE], {"default": SI.NONE}, ),
}
}
RETURN_NAMES = ("TIMESTEP_KF", )
RETURN_TYPES = ("TIMESTEP_KEYFRAME", )
FUNCTION = "load_keyframe"
CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/keyframes"
def load_keyframe(self,
start_percent: float,
strength: float=1.0,
cn_weights: ControlWeightsImport=None, control_net_weights: ControlWeightsImport=None, # old name
latent_keyframe: LatentKeyframeGroupImport=None,
prev_timestep_kf: TimestepKeyframeGroupImport=None, prev_timestep_keyframe: TimestepKeyframeGroupImport=None, # old name
null_latent_kf_strength: float=0.0,
inherit_missing=True,
guarantee_usage=True,
mask_optional=None,
interpolation: str=SI.NONE,):
control_net_weights = control_net_weights if control_net_weights else cn_weights
prev_timestep_keyframe = prev_timestep_keyframe if prev_timestep_keyframe else prev_timestep_kf
if not prev_timestep_keyframe:
prev_timestep_keyframe = TimestepKeyframeGroupImport()
else:
prev_timestep_keyframe = prev_timestep_keyframe.clone()
|
class TimestepKeyframeNodeImport:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}, ),
},
"optional": {
"prev_timestep_kf": ("TIMESTEP_KEYFRAME", ),
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}, ),
"cn_weights": ("CONTROL_NET_WEIGHTS", ),
"latent_keyframe": ("LATENT_KEYFRAME", ),
"null_latent_kf_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 10.0, "step": 0.001}, ),
"inherit_missing": ("BOOLEAN", {"default": True}, ),
"guarantee_usage": ("BOOLEAN", {"default": True}, ),
"mask_optional": ("MASK", ),
#"interpolation": ([SI.LINEAR, SI.EASE_IN, SI.EASE_OUT, SI.EASE_IN_OUT, SI.NONE], {"default": SI.NONE}, ),
}
}
RETURN_NAMES = ("TIMESTEP_KF", )
RETURN_TYPES = ("TIMESTEP_KEYFRAME", )
FUNCTION = "load_keyframe"
CATEGORY = "Adv-ControlNet 🛂🅐🅒🅝/keyframes"
def load_keyframe(self,
start_percent: float,
strength: float=1.0,
cn_weights: ControlWeightsImport=None, control_net_weights: ControlWeightsImport=None, # old name
latent_keyframe: LatentKeyframeGroupImport=None,
prev_timestep_kf: TimestepKeyframeGroupImport=None, prev_timestep_keyframe: TimestepKeyframeGroupImport=None, # old name
null_latent_kf_strength: float=0.0,
inherit_missing=True,
guarantee_usage=True,
mask_optional=None,
interpolation: str=SI.NONE,):
control_net_weights = control_net_weights if control_net_weights else cn_weights
prev_timestep_keyframe = prev_timestep_keyframe if prev_timestep_keyframe else prev_timestep_kf
if not prev_timestep_keyframe:
prev_timestep_keyframe = TimestepKeyframeGroupImport()
else:
prev_timestep_keyframe = prev_timestep_keyframe.clone() | keyframe = TimestepKeyframeImport(start_percent=start_percent, strength=strength, interpolation=interpolation, null_latent_kf_strength=null_latent_kf_strength, | 5 | 2023-11-11 01:26:26+00:00 | 12k |
Zaloog/kanban-python | src/kanban_python/controls.py | [
{
"identifier": "cfg",
"path": "src/kanban_python/config.py",
"snippet": "class KanbanConfig:\n def __init__(self, path=CONFIG_FILE_PATH) -> None:\n def __repr__(self) -> str:\n def save(self):\n def config(self) -> configparser.ConfigParser:\n def active_board(self) -> str:\n def acti... | from json import dump, load
from rich.pretty import pprint
from .config import (
cfg,
check_if_board_name_exists_in_config,
check_if_current_active_board_in_board_list,
delete_board_from_config,
get_json_path,
)
from .constants import (
DUMMY_DB,
KANBAN_BOARDS_PATH,
REPORT_FILE_NAME,
REPORT_FILE_PATH,
TASK_FILE_NAME,
)
from .interface import (
create_config_table,
create_github_like_report_table,
create_table,
input_ask_for_action,
input_ask_for_action_settings,
input_ask_for_change_board,
input_ask_for_delete_board,
input_ask_for_new_board_name,
input_ask_which_task_to_update,
input_ask_which_tasks_to_show,
input_change_column_settings,
input_change_done_limit_settings,
input_change_files_to_scan_settings,
input_change_footer_settings,
input_change_min_col_width_settings,
input_change_patterns_to_scan_settings,
input_confirm_add_todos_to_board,
input_confirm_delete_board,
input_confirm_set_board_active,
input_create_new_task,
input_update_task,
)
from .utils import (
check_board_name_valid,
check_if_done_col_leq_X,
check_if_there_are_visible_tasks_in_board,
check_scanner_files_valid,
check_scanner_patterns_valid,
console,
create_report_document,
current_time_to_str,
delete_json_file,
get_tag_id_choices,
move_first_done_task_to_archive,
scan_files,
scan_for_todos,
split_todo_in_tag_and_title,
) | 7,223 | # Action 5
def delete_kanban_board():
board_to_delete = input_ask_for_delete_board()
if input_confirm_delete_board(board_to_delete):
board_to_delete_path = cfg.kanban_boards_dict[board_to_delete]
delete_json_file(board_to_delete_path)
delete_board_from_config(board_to_delete)
def show():
if not cfg.kanban_boards:
console.print(":warning: [red]No Boards created yet[/]:warning:")
console.print("Use 'kanban init' to create a new kanban board.")
raise KeyboardInterrupt
if not check_if_current_active_board_in_board_list():
console.print(
"[yellow]Hmm, Something went wrong.[/] "
+ f"The active board '{cfg.active_board}' is not in the list of boards."
)
change_kanban_board()
show()
return
db_data = read_db()
table = create_table(data=db_data)
console.print(table)
# Scan Functionality
#####################################################################################
def add_todos_to_board():
files = scan_files(endings=cfg.scanned_files)
todos = scan_for_todos(file_paths=files, patterns=cfg.scanned_patterns)
if not todos:
console.print(
":cross_mark: [red]Nothing found that "
+ "matches any of your provided patterns.[/]"
)
return
# TODO Write Docs for kanban scan functionality
# BUG This pattern also works
if input_confirm_add_todos_to_board(todos=todos):
todo_task_list = []
for task, file in todos:
tag, title = split_todo_in_tag_and_title(task, cfg.scanned_patterns)
new_task = {
"Title": title,
"Description": f"from {file}",
"Status": "Ready",
"Tag": tag,
"Creation_Date": current_time_to_str(),
"Begin_Time": "",
"Complete_Time": "",
"Duration": 0,
}
todo_task_list.append(new_task)
add_tasks_to_db(tasks=todo_task_list)
# Config Settings
#####################################################################################
def change_settings():
while True:
show_settings()
settings_selection = input_ask_for_action_settings()
if settings_selection == 1:
change_kanban_board()
new_min_col_widths = input_change_min_col_width_settings()
cfg.col_min_width = new_min_col_widths
done_limit = input_change_done_limit_settings()
cfg.done_limit = done_limit
footer_visible = input_change_footer_settings()
cfg.show_footer = "True" if footer_visible else "False"
if settings_selection == 2:
updated_col_config = input_change_column_settings()
cfg.kanban_columns_dict = updated_col_config
if settings_selection == 3:
while True:
new_files_to_scan = input_change_files_to_scan_settings()
if check_scanner_files_valid(new_files_to_scan):
cfg.scanned_files = new_files_to_scan
break
console.print(
f":warning: '{new_files_to_scan}' is [red]not[/] a valid."
)
while True:
new_patterns_to_scan = input_change_patterns_to_scan_settings()
if check_scanner_patterns_valid(new_patterns_to_scan):
cfg.scanned_patterns = new_patterns_to_scan
break
console.print(
f":warning: '{new_patterns_to_scan}' is [red]not[/] a valid."
)
if settings_selection == 4:
break
def show_settings():
settings_table = create_config_table()
console.print(settings_table)
# Report Creation
#####################################################################################
def create_report():
boards_dict = read_db("all")
gh_table = create_github_like_report_table(boards_dict)
console.print(gh_table)
if not REPORT_FILE_PATH.exists():
REPORT_FILE_PATH.mkdir(exist_ok=True)
| from __future__ import annotations
# DB Controls
#####################################################################################
def create_new_db() -> None:
while True:
while True:
new_board_name = input_ask_for_new_board_name()
if check_board_name_valid(new_board_name):
break
console.print(f":warning: '{new_board_name}' is [red]not[/] a valid Name.")
if not check_if_board_name_exists_in_config(new_board_name):
break
console.print(
f":warning: Board '{new_board_name}' already exists, choose another Name."
)
cfg.kanban_boards_dict = new_board_name
# Options:
# 1. ~/.kanban-python/<BOARDNAME>.json
# 2. ~/.kanban-python/kanban_boards/<BOARDNAME>.json
# 3. ~/.kanban-python/kanban_boards/<BOARDNAME>/pykanban.json <- THIS
# 4. ~/.kanban-python/kanban_boards/<BOARDNAME>/<BOARDNAME>.json
new_db_path = KANBAN_BOARDS_PATH / new_board_name
if not new_db_path.exists():
new_db_path.mkdir()
with open(get_json_path(new_board_name), "w", encoding="utf-8") as f:
dump(DUMMY_DB, f, ensure_ascii=False, indent=4)
console.print(
f"Created new [orange3]{TASK_FILE_NAME}[/] file at "
+ f"[orange3]{KANBAN_BOARDS_PATH / new_board_name}[/] to save tasks."
)
if input_confirm_set_board_active(name=new_board_name):
cfg.active_board = new_board_name
def save_db(data):
path = cfg.active_board_path
with open(path, "w", encoding="utf-8") as f:
dump(data, f, ensure_ascii=False, indent=4)
def add_tasks_to_db(tasks: dict | list[dict]) -> None:
db_data = read_db()
if isinstance(tasks, dict):
new_id = str(max(int(i) for i in db_data.keys()) + 1)
db_data[new_id] = tasks
else:
for task in tasks:
new_id = str(max(int(i) for i in db_data.keys()) + 1)
db_data[new_id] = task
save_db(data=db_data)
def read_db(path: str = None) -> dict:
if not path:
path = cfg.active_board_path
if path == "all":
board_dict = {
b: read_single_board(b_path) for b, b_path in cfg.kanban_boards_dict.items()
}
return board_dict
try:
data = read_single_board(path)
return data
except FileNotFoundError:
print(path)
console.print(f":warning: No [orange3]{TASK_FILE_NAME}[/] file here anymore.")
console.print("Please change to another board.")
change_kanban_board()
console.print(f"[red]Seems like the previous {TASK_FILE_NAME} file was deleted[/]")
console.print(f"Create new [orange3]{TASK_FILE_NAME}[/] file here.")
create_new_db()
return read_db()
def read_single_board(path):
with open(path, "r") as file:
data = load(file)
return data
# User Action Controls
#####################################################################################
# Get User Action
def get_user_action():
return input_ask_for_action()
# Action 1
def add_new_task_to_db():
new_task = input_create_new_task()
add_tasks_to_db(tasks=new_task)
# Action 2
def update_task_from_db():
db_data = read_db()
if not check_if_there_are_visible_tasks_in_board(db_data, cfg.vis_cols):
console.print(":cross_mark:[red]No Tasks available on this Kanban board[/]")
return
selected_id = input_ask_which_task_to_update(db_data)
updated_task = input_update_task(current_task=db_data[selected_id])
db_data[selected_id] = updated_task
while not check_if_done_col_leq_X(cfg=cfg, data=db_data):
first_task_id, archive_task = move_first_done_task_to_archive(data=db_data)
db_data[first_task_id] = archive_task
save_db(data=db_data)
# Action 3
def change_kanban_board():
boards_dict = read_db(path="all")
new_active_board = input_ask_for_change_board(boards_dict)
cfg.active_board = new_active_board
# Action 4
def show_tasks():
db_data = read_db()
choices = get_tag_id_choices(db_data, cfg.vis_cols)
selection_criteria = input_ask_which_tasks_to_show(choices)
for i, task in db_data.items():
if selection_criteria in [i, task["Tag"]]:
console.print(
20 * "[bold blue]#[/]" + f" Task {i} " + 20 * "[bold blue]#[/]"
)
pprint(
{
key: val
for key, val in task.items()
if key in ["Title", "Description", "Tag", "Status", "Due_Date"]
},
console=console,
expand_all=True,
)
# Action 5
def delete_kanban_board():
board_to_delete = input_ask_for_delete_board()
if input_confirm_delete_board(board_to_delete):
board_to_delete_path = cfg.kanban_boards_dict[board_to_delete]
delete_json_file(board_to_delete_path)
delete_board_from_config(board_to_delete)
def show():
if not cfg.kanban_boards:
console.print(":warning: [red]No Boards created yet[/]:warning:")
console.print("Use 'kanban init' to create a new kanban board.")
raise KeyboardInterrupt
if not check_if_current_active_board_in_board_list():
console.print(
"[yellow]Hmm, Something went wrong.[/] "
+ f"The active board '{cfg.active_board}' is not in the list of boards."
)
change_kanban_board()
show()
return
db_data = read_db()
table = create_table(data=db_data)
console.print(table)
# Scan Functionality
#####################################################################################
def add_todos_to_board():
files = scan_files(endings=cfg.scanned_files)
todos = scan_for_todos(file_paths=files, patterns=cfg.scanned_patterns)
if not todos:
console.print(
":cross_mark: [red]Nothing found that "
+ "matches any of your provided patterns.[/]"
)
return
# TODO Write Docs for kanban scan functionality
# BUG This pattern also works
if input_confirm_add_todos_to_board(todos=todos):
todo_task_list = []
for task, file in todos:
tag, title = split_todo_in_tag_and_title(task, cfg.scanned_patterns)
new_task = {
"Title": title,
"Description": f"from {file}",
"Status": "Ready",
"Tag": tag,
"Creation_Date": current_time_to_str(),
"Begin_Time": "",
"Complete_Time": "",
"Duration": 0,
}
todo_task_list.append(new_task)
add_tasks_to_db(tasks=todo_task_list)
# Config Settings
#####################################################################################
def change_settings():
while True:
show_settings()
settings_selection = input_ask_for_action_settings()
if settings_selection == 1:
change_kanban_board()
new_min_col_widths = input_change_min_col_width_settings()
cfg.col_min_width = new_min_col_widths
done_limit = input_change_done_limit_settings()
cfg.done_limit = done_limit
footer_visible = input_change_footer_settings()
cfg.show_footer = "True" if footer_visible else "False"
if settings_selection == 2:
updated_col_config = input_change_column_settings()
cfg.kanban_columns_dict = updated_col_config
if settings_selection == 3:
while True:
new_files_to_scan = input_change_files_to_scan_settings()
if check_scanner_files_valid(new_files_to_scan):
cfg.scanned_files = new_files_to_scan
break
console.print(
f":warning: '{new_files_to_scan}' is [red]not[/] a valid."
)
while True:
new_patterns_to_scan = input_change_patterns_to_scan_settings()
if check_scanner_patterns_valid(new_patterns_to_scan):
cfg.scanned_patterns = new_patterns_to_scan
break
console.print(
f":warning: '{new_patterns_to_scan}' is [red]not[/] a valid."
)
if settings_selection == 4:
break
def show_settings():
settings_table = create_config_table()
console.print(settings_table)
# Report Creation
#####################################################################################
def create_report():
boards_dict = read_db("all")
gh_table = create_github_like_report_table(boards_dict)
console.print(gh_table)
if not REPORT_FILE_PATH.exists():
REPORT_FILE_PATH.mkdir(exist_ok=True) | create_report_document(boards_dict=boards_dict) | 27 | 2023-11-11 14:43:55+00:00 | 12k |
AMAAI-Lab/mustango | diffusers/src/diffusers/schedulers/scheduling_dpmsolver_multistep.py | [
{
"identifier": "ConfigMixin",
"path": "diffusers/src/diffusers/configuration_utils.py",
"snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. Stores all configuration parameters under `self.config` Also handles all\n methods for loading/downloading/saving classes... | import math
import numpy as np
import torch
from typing import List, Optional, Tuple, Union
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput | 7,637 | # Copyright 2023 TSAIL Team and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver
# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
(1-beta) over time from t = [0,1].
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
to that part of the diffusion process.
Args:
num_diffusion_timesteps (`int`): the number of betas to produce.
max_beta (`float`): the maximum beta to use; use values lower than 1 to
prevent singularities.
Returns:
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
"""
def alpha_bar(time_step):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return torch.tensor(betas, dtype=torch.float32)
| # Copyright 2023 TSAIL Team and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver
# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
(1-beta) over time from t = [0,1].
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
to that part of the diffusion process.
Args:
num_diffusion_timesteps (`int`): the number of betas to produce.
max_beta (`float`): the maximum beta to use; use values lower than 1 to
prevent singularities.
Returns:
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
"""
def alpha_bar(time_step):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return torch.tensor(betas, dtype=torch.float32)
| class DPMSolverMultistepScheduler(SchedulerMixin, ConfigMixin): | 0 | 2023-11-14 23:29:31+00:00 | 12k |
BraveGroup/Drive-WM | tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py | [
{
"identifier": "TEXT_TO_IMAGE_BATCH_PARAMS",
"path": "tests/pipelines/pipeline_params.py",
"snippet": "TEXT_TO_IMAGE_BATCH_PARAMS = frozenset([\"prompt\", \"negative_prompt\"])"
},
{
"identifier": "TEXT_TO_IMAGE_IMAGE_PARAMS",
"path": "tests/pipelines/pipeline_params.py",
"snippet": "TE... | import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNet2DConditionModel,
)
from diffusers.utils.testing_utils import (
load_numpy,
nightly,
numpy_cosine_similarity_distance,
require_torch_gpu,
skip_mps,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin | 9,835 | # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
torch.backends.cuda.matmul.allow_tf32 = False
@skip_mps
class StableDiffusionAttendAndExcitePipelineFastTests(
| # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
torch.backends.cuda.matmul.allow_tf32 = False
@skip_mps
class StableDiffusionAttendAndExcitePipelineFastTests( | PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase | 4 | 2023-11-18 01:40:55+00:00 | 12k |
basnijholt/unidep | unidep/_cli.py | [
{
"identifier": "create_conda_env_specification",
"path": "unidep/_conda_env.py",
"snippet": "def create_conda_env_specification( # noqa: PLR0912\n resolved: dict[str, dict[Platform | None, dict[CondaPip, Spec]]],\n channels: list[str],\n platforms: list[Platform],\n selector: Literal[\"sel... | import argparse
import importlib.util
import os
import shutil
import subprocess
import sys
from pathlib import Path
from unidep._conda_env import (
create_conda_env_specification,
write_conda_environment_file,
)
from unidep._conda_lock import conda_lock_command
from unidep._conflicts import resolve_conflicts
from unidep._dependencies_parsing import (
find_requirements_files,
parse_local_dependencies,
parse_requirements,
)
from unidep._setuptools_integration import (
filter_python_dependencies,
get_python_dependencies,
)
from unidep._version import __version__
from unidep.platform_definitions import Platform
from unidep.utils import (
add_comment_to_file,
dependencies_filename,
escape_unicode,
identify_current_platform,
is_pip_installable,
parse_package_str,
warn,
)
from typing import Literal, get_args
from typing_extensions import Literal, get_args
from rich_argparse import RichHelpFormatter
from argparse import HelpFormatter as _HelpFormatter # type: ignore[assignment] | 8,069 | parser_lock,
{
"directory",
"verbose",
"platform",
"depth",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
},
)
# Subparser for the 'pip-compile' command
pip_compile_help = (
"Generate a fully pinned `requirements.txt` file from one or more"
f" {_DEP_FILES}"
" files using `pip-compile` from `pip-tools`. This"
f" command consolidates all pip dependencies defined in the {_DEP_FILES}"
" files and compiles them into a single `requirements.txt` file, taking"
" into account the specific versions and dependencies of each package."
)
pip_compile_example = (
" Example usage: `unidep pip-compile --directory ./projects` to generate"
f" a `requirements.txt` file for all {_DEP_FILES}"
" files in the"
" `./projects` directory. Use `--output-file requirements.txt` to specify a"
" different output file."
)
parser_pip_compile = subparsers.add_parser(
"pip-compile",
help=pip_compile_help,
description=pip_compile_help + pip_compile_example,
formatter_class=_HelpFormatter,
)
parser_pip_compile.add_argument(
"-o",
"--output-file",
type=Path,
default=None,
help="Output file for the pip requirements, by default `requirements.txt`",
)
_add_common_args(
parser_pip_compile,
{
"directory",
"verbose",
"platform",
"depth",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
},
)
parser_pip_compile.add_argument(
"extra_flags",
nargs=argparse.REMAINDER,
help="Extra flags to pass to `pip-compile`. These flags are passed directly"
" and should be provided in the format expected by `pip-compile`. For example,"
" `unidep pip-compile -- --generate-hashes --allow-unsafe`. Note that the"
" `--` is required to separate the flags for `unidep` from the flags for"
" `pip-compile`.",
)
# Subparser for the 'pip' and 'conda' command
help_str = "Get the {} requirements for the current platform only."
help_example = (
" Example usage: `unidep {which} --file folder1 --file"
" folder2/requirements.yaml --seperator ' ' --platform linux-64` to"
" extract all the {which} dependencies specific to the linux-64 platform. Note"
" that the `--file` argument can be used multiple times to specify multiple"
f" {_DEP_FILES}"
" files and that --file can also be a folder that contains"
f" a {_DEP_FILES} file."
)
parser_pip = subparsers.add_parser(
"pip",
help=help_str.format("pip"),
description=help_str.format("pip") + help_example.format(which="pip"),
formatter_class=_HelpFormatter,
)
parser_conda = subparsers.add_parser(
"conda",
help=help_str.format("conda"),
description=help_str.format("conda") + help_example.format(which="conda"),
formatter_class=_HelpFormatter,
)
for sub_parser in [parser_pip, parser_conda]:
_add_common_args(
sub_parser,
{
"verbose",
"platform",
"file",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
},
)
sub_parser.add_argument(
"--separator",
type=str,
default=" ",
help="The separator between the dependencies, by default ` `",
)
# Subparser for the 'version' command
parser_merge = subparsers.add_parser(
"version",
help="Print version information of unidep.",
formatter_class=_HelpFormatter,
)
args = parser.parse_args()
if args.command is None: # pragma: no cover
parser.print_help()
sys.exit(1)
if "file" in args and args.file.is_dir(): # pragma: no cover
| #!/usr/bin/env python3
"""unidep - Unified Conda and Pip requirements management.
This module provides a command-line tool for managing conda environment.yaml files.
"""
from __future__ import annotations
if sys.version_info >= (3, 8):
else: # pragma: no cover
try: # pragma: no cover
class _HelpFormatter(RichHelpFormatter):
def _get_help_string(self, action: argparse.Action) -> str | None:
# escapes "[" in text, otherwise e.g., [linux] is removed
if action.help is not None:
return action.help.replace("[", r"\[")
return None
except ImportError: # pragma: no cover
_DEP_FILES = "`requirements.yaml` or `pyproject.toml`"
def _add_common_args( # noqa: PLR0912
sub_parser: argparse.ArgumentParser,
options: set[str],
) -> None: # pragma: no cover
if "directory" in options:
sub_parser.add_argument(
"-d",
"--directory",
type=Path,
default=".",
help=f"Base directory to scan for {_DEP_FILES} file(s), by default `.`",
)
if "file" in options:
sub_parser.add_argument(
"-f",
"--file",
type=Path,
default=".",
help=f"The {_DEP_FILES} file to parse, or folder"
" that contains that file, by default `.`",
)
if "verbose" in options:
sub_parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="Print verbose output",
)
if "platform" in options:
current_platform = identify_current_platform()
sub_parser.add_argument(
"--platform",
"-p",
type=str,
action="append", # Allow multiple instances of -p
default=None, # Default is a list with the current platform set in `main`
choices=get_args(Platform),
help="The platform(s) to get the requirements for. "
"Multiple platforms can be specified. "
f"By default, the current platform (`{current_platform}`) is used.",
)
if "editable" in options:
sub_parser.add_argument(
"-e",
"--editable",
action="store_true",
help="Install the project in editable mode",
)
if "depth" in options:
sub_parser.add_argument(
"--depth",
type=int,
default=1,
help=f"Maximum depth to scan for {_DEP_FILES} files, by default 1",
)
if "*files" in options:
sub_parser.add_argument(
"files",
type=Path,
nargs="+",
help=f"The {_DEP_FILES} file(s) to parse"
" or folder(s) that contain"
" those file(s), by default `.`",
default=None, # default is "." set in `main`
)
if "skip-local" in options:
sub_parser.add_argument(
"--skip-local",
action="store_true",
help="Skip installing local dependencies",
)
if "skip-pip" in options:
sub_parser.add_argument(
"--skip-pip",
action="store_true",
help=f"Skip installing pip dependencies from {_DEP_FILES}",
)
if "skip-conda" in options:
sub_parser.add_argument(
"--skip-conda",
action="store_true",
help=f"Skip installing conda dependencies from {_DEP_FILES}",
)
if "skip-dependency" in options:
sub_parser.add_argument(
"--skip-dependency",
type=str,
action="append",
default=[],
help="Skip installing a specific dependency that is in one of the"
f" {_DEP_FILES}"
" files. This option can be used multiple times, each"
" time specifying a different package to skip."
" For example, use `--skip-dependency pandas` to skip installing pandas.",
)
if "no-dependencies" in options:
sub_parser.add_argument(
"--no-dependencies",
action="store_true",
help=f"Skip installing dependencies from {_DEP_FILES}"
" file(s) and only install local package(s). Useful after"
" installing a `conda-lock.yml` file because then all"
" dependencies have already been installed.",
)
if "conda-executable" in options:
sub_parser.add_argument(
"--conda-executable",
type=str,
choices=("conda", "mamba", "micromamba"),
help="The conda executable to use",
default=None,
)
if "dry-run" in options:
sub_parser.add_argument(
"--dry-run",
"--dry",
action="store_true",
help="Only print the commands that would be run",
)
if "ignore-pin" in options:
sub_parser.add_argument(
"--ignore-pin",
type=str,
action="append",
default=[],
help="Ignore the version pin for a specific package,"
" e.g., `--ignore-pin numpy`. This option can be repeated"
" to ignore multiple packages.",
)
if "overwrite-pin" in options:
sub_parser.add_argument(
"--overwrite-pin",
type=str,
action="append",
default=[],
help="Overwrite the version pin for a specific package,"
" e.g., `--overwrite-pin 'numpy==1.19.2'`. This option can be repeated"
" to overwrite the pins of multiple packages.",
)
def _parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Unified Conda and Pip requirements management.",
formatter_class=_HelpFormatter,
)
subparsers = parser.add_subparsers(dest="command", help="Subcommands")
# Subparser for the 'merge' command
merge_help = (
f"Combine multiple (or a single) {_DEP_FILES}"
" files into a"
" single Conda installable `environment.yaml` file."
)
merge_example = (
" Example usage: `unidep merge --directory . --depth 1 --output environment.yaml`" # noqa: E501
f" to search for {_DEP_FILES}"
" files in the current directory and its"
" subdirectories and create `environment.yaml`. These are the defaults, so you"
" can also just run `unidep merge`."
)
parser_merge = subparsers.add_parser(
"merge",
help=merge_help,
description=merge_help + merge_example,
formatter_class=_HelpFormatter,
)
parser_merge.add_argument(
"-o",
"--output",
type=Path,
default="environment.yaml",
help="Output file for the conda environment, by default `environment.yaml`",
)
parser_merge.add_argument(
"-n",
"--name",
type=str,
default="myenv",
help="Name of the conda environment, by default `myenv`",
)
parser_merge.add_argument(
"--stdout",
action="store_true",
help="Output to stdout instead of a file",
)
parser_merge.add_argument(
"--selector",
type=str,
choices=("sel", "comment"),
default="sel",
help="The selector to use for the environment markers, if `sel` then"
" `- numpy # [linux]` becomes `sel(linux): numpy`, if `comment` then"
" it remains `- numpy # [linux]`, by default `sel`",
)
_add_common_args(
parser_merge,
{
"directory",
"verbose",
"platform",
"depth",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
},
)
# Subparser for the 'install' command
install_help = (
f"Automatically install all dependencies from one or more {_DEP_FILES} files."
" This command first installs dependencies"
" with Conda, then with Pip. Finally, it installs local packages"
f" (those containing the {_DEP_FILES} files)"
" using `pip install [-e] ./project`."
)
install_example = (
" Example usage: `unidep install .` for a single project."
" For multiple projects: `unidep install ./project1 ./project2`."
" The command accepts both file paths and directories containing"
f" a {_DEP_FILES} file. Use `--editable` or"
" `-e` to install the local packages in editable mode. See"
f" `unidep install-all` to install all {_DEP_FILES} files in and below the"
" current folder."
)
parser_install = subparsers.add_parser(
"install",
help=install_help,
description=install_help + install_example,
formatter_class=_HelpFormatter,
)
# Add positional argument for the file
_add_common_args(
parser_install,
{
"*files",
"conda-executable",
"dry-run",
"editable",
"skip-local",
"skip-pip",
"skip-conda",
"no-dependencies",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
"verbose",
},
)
install_all_help = (
f"Install dependencies from all {_DEP_FILES}"
" files found in the current"
" directory or specified directory. This command first installs dependencies"
" using Conda, then Pip, and finally the local packages."
)
install_all_example = (
" Example usage: `unidep install-all` to install dependencies from all"
f" {_DEP_FILES}"
" files in the current directory. Use"
" `--directory ./path/to/dir` to specify a different directory. Use"
" `--depth` to control the depth of directory search. Add `--editable`"
" or `-e` for installing local packages in editable mode."
)
parser_install_all = subparsers.add_parser(
"install-all",
help=install_all_help,
description=install_all_help + install_all_example,
formatter_class=_HelpFormatter,
)
# Add positional argument for the file
_add_common_args(
parser_install_all,
{
"conda-executable",
"dry-run",
"editable",
"depth",
"directory",
"skip-local",
"skip-pip",
"skip-conda",
"no-dependencies",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
"verbose",
},
)
# Subparser for the 'conda-lock' command
conda_lock_help = (
"Generate a global `conda-lock.yml` file for a collection of"
f" {_DEP_FILES}"
" files. Additionally, create individual"
f" `conda-lock.yml` files for each {_DEP_FILES} file"
" consistent with the global lock file."
)
conda_lock_example = (
" Example usage: `unidep conda-lock --directory ./projects` to generate"
f" conda-lock files for all {_DEP_FILES}"
" files in the `./projects`"
" directory. Use `--only-global` to generate only the global lock file."
" The `--check-input-hash` option can be used to avoid regenerating lock"
" files if the input hasn't changed."
)
parser_lock = subparsers.add_parser(
"conda-lock",
help=conda_lock_help,
description=conda_lock_help + conda_lock_example,
formatter_class=_HelpFormatter,
)
parser_lock.add_argument(
"--only-global",
action="store_true",
help="Only generate the global lock file",
)
parser_lock.add_argument(
"--lockfile",
type=Path,
default="conda-lock.yml",
help="Specify a path for the global lockfile (default: `conda-lock.yml`"
" in current directory). Path should be relative, e.g.,"
" `--lockfile ./locks/example.conda-lock.yml`.",
)
parser_lock.add_argument(
"--check-input-hash",
action="store_true",
help="Check existing input hashes in lockfiles before regenerating lock files."
" This flag is directly passed to `conda-lock`.",
)
_add_common_args(
parser_lock,
{
"directory",
"verbose",
"platform",
"depth",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
},
)
# Subparser for the 'pip-compile' command
pip_compile_help = (
"Generate a fully pinned `requirements.txt` file from one or more"
f" {_DEP_FILES}"
" files using `pip-compile` from `pip-tools`. This"
f" command consolidates all pip dependencies defined in the {_DEP_FILES}"
" files and compiles them into a single `requirements.txt` file, taking"
" into account the specific versions and dependencies of each package."
)
pip_compile_example = (
" Example usage: `unidep pip-compile --directory ./projects` to generate"
f" a `requirements.txt` file for all {_DEP_FILES}"
" files in the"
" `./projects` directory. Use `--output-file requirements.txt` to specify a"
" different output file."
)
parser_pip_compile = subparsers.add_parser(
"pip-compile",
help=pip_compile_help,
description=pip_compile_help + pip_compile_example,
formatter_class=_HelpFormatter,
)
parser_pip_compile.add_argument(
"-o",
"--output-file",
type=Path,
default=None,
help="Output file for the pip requirements, by default `requirements.txt`",
)
_add_common_args(
parser_pip_compile,
{
"directory",
"verbose",
"platform",
"depth",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
},
)
parser_pip_compile.add_argument(
"extra_flags",
nargs=argparse.REMAINDER,
help="Extra flags to pass to `pip-compile`. These flags are passed directly"
" and should be provided in the format expected by `pip-compile`. For example,"
" `unidep pip-compile -- --generate-hashes --allow-unsafe`. Note that the"
" `--` is required to separate the flags for `unidep` from the flags for"
" `pip-compile`.",
)
# Subparser for the 'pip' and 'conda' command
help_str = "Get the {} requirements for the current platform only."
help_example = (
" Example usage: `unidep {which} --file folder1 --file"
" folder2/requirements.yaml --seperator ' ' --platform linux-64` to"
" extract all the {which} dependencies specific to the linux-64 platform. Note"
" that the `--file` argument can be used multiple times to specify multiple"
f" {_DEP_FILES}"
" files and that --file can also be a folder that contains"
f" a {_DEP_FILES} file."
)
parser_pip = subparsers.add_parser(
"pip",
help=help_str.format("pip"),
description=help_str.format("pip") + help_example.format(which="pip"),
formatter_class=_HelpFormatter,
)
parser_conda = subparsers.add_parser(
"conda",
help=help_str.format("conda"),
description=help_str.format("conda") + help_example.format(which="conda"),
formatter_class=_HelpFormatter,
)
for sub_parser in [parser_pip, parser_conda]:
_add_common_args(
sub_parser,
{
"verbose",
"platform",
"file",
"ignore-pin",
"skip-dependency",
"overwrite-pin",
},
)
sub_parser.add_argument(
"--separator",
type=str,
default=" ",
help="The separator between the dependencies, by default ` `",
)
# Subparser for the 'version' command
parser_merge = subparsers.add_parser(
"version",
help="Print version information of unidep.",
formatter_class=_HelpFormatter,
)
args = parser.parse_args()
if args.command is None: # pragma: no cover
parser.print_help()
sys.exit(1)
if "file" in args and args.file.is_dir(): # pragma: no cover | args.file = dependencies_filename(args.file) | 12 | 2023-11-16 04:23:01+00:00 | 12k |
BAAI-DCAI/SegVol | inference_demo.py | [
{
"identifier": "sam_model_registry",
"path": "segment_anything_volumetric/build_sam.py",
"snippet": "def build_sam_vit_3d(args, checkpoint=None):\ndef _build_sam(\n image_encoder_type,\n embed_dim,\n patch_size,\n checkpoint,\n image_size,\n):"
},
{
"identifier": "SegVol",
"p... | import argparse
import os
import torch
import torch.nn.functional as F
import json
import monai.transforms as transforms
from segment_anything_volumetric import sam_model_registry
from network.model import SegVol
from data_process.demo_data_process import process_ct_gt
from utils.monai_inferers_utils import sliding_window_inference, generate_box, select_points, build_binary_cube, build_binary_points, logits2roi_coor
from utils.visualize import draw_result | 10,312 |
def set_parse():
# %% set up parser
parser = argparse.ArgumentParser()
parser.add_argument("--test_mode", default=True, type=bool)
parser.add_argument("--resume", type = str, default = '')
parser.add_argument("-infer_overlap", default=0.5, type=float, help="sliding window inference overlap")
parser.add_argument("-spatial_size", default=(32, 256, 256), type=tuple)
parser.add_argument("-patch_size", default=(4, 16, 16), type=tuple)
parser.add_argument('-work_dir', type=str, default='./work_dir')
### demo
parser.add_argument('--demo_config', type=str, required=True)
parser.add_argument("--clip_ckpt", type = str, default = './config/clip')
args = parser.parse_args()
return args
def dice_score(preds, labels): # on GPU
assert preds.shape[0] == labels.shape[0], "predict & target batch size don't match\n" + str(preds.shape) + str(labels.shape)
predict = preds.view(1, -1)
target = labels.view(1, -1)
if target.shape[1] < 1e8:
predict = predict.cuda()
target = target.cuda()
predict = torch.sigmoid(predict)
predict = torch.where(predict > 0.5, 1., 0.)
tp = torch.sum(torch.mul(predict, target))
den = torch.sum(predict) + torch.sum(target) + 1
dice = 2 * tp / den
if target.shape[1] < 1e8:
predict = predict.cpu()
target = target.cpu()
return dice
def zoom_in_zoom_out(args, segvol_model, image, image_resize, gt3D, gt3D_resize, categories=None):
logits_labels_record = {}
image_single_resize = image_resize
image_single = image[0,0]
ori_shape = image_single.shape
for item_idx in range(len(categories)):
# get label to generate prompts
label_single = gt3D[0][item_idx]
label_single_resize = gt3D_resize[0][item_idx]
# skip meaningless categories
if torch.sum(label_single) == 0:
print('No object, skip')
continue
# generate prompts
text_single = categories[item_idx] if args.use_text_prompt else None
if categories is not None: print(f'inference |{categories[item_idx]}| target...')
points_single = None
box_single = None
if args.use_point_prompt:
point, point_label = select_points(label_single_resize, num_positive_extra=3, num_negative_extra=3)
points_single = (point.unsqueeze(0).float().cuda(), point_label.unsqueeze(0).float().cuda())
binary_points_resize = build_binary_points(point, point_label, label_single_resize.shape)
if args.use_box_prompt:
|
def set_parse():
# %% set up parser
parser = argparse.ArgumentParser()
parser.add_argument("--test_mode", default=True, type=bool)
parser.add_argument("--resume", type = str, default = '')
parser.add_argument("-infer_overlap", default=0.5, type=float, help="sliding window inference overlap")
parser.add_argument("-spatial_size", default=(32, 256, 256), type=tuple)
parser.add_argument("-patch_size", default=(4, 16, 16), type=tuple)
parser.add_argument('-work_dir', type=str, default='./work_dir')
### demo
parser.add_argument('--demo_config', type=str, required=True)
parser.add_argument("--clip_ckpt", type = str, default = './config/clip')
args = parser.parse_args()
return args
def dice_score(preds, labels): # on GPU
assert preds.shape[0] == labels.shape[0], "predict & target batch size don't match\n" + str(preds.shape) + str(labels.shape)
predict = preds.view(1, -1)
target = labels.view(1, -1)
if target.shape[1] < 1e8:
predict = predict.cuda()
target = target.cuda()
predict = torch.sigmoid(predict)
predict = torch.where(predict > 0.5, 1., 0.)
tp = torch.sum(torch.mul(predict, target))
den = torch.sum(predict) + torch.sum(target) + 1
dice = 2 * tp / den
if target.shape[1] < 1e8:
predict = predict.cpu()
target = target.cpu()
return dice
def zoom_in_zoom_out(args, segvol_model, image, image_resize, gt3D, gt3D_resize, categories=None):
logits_labels_record = {}
image_single_resize = image_resize
image_single = image[0,0]
ori_shape = image_single.shape
for item_idx in range(len(categories)):
# get label to generate prompts
label_single = gt3D[0][item_idx]
label_single_resize = gt3D_resize[0][item_idx]
# skip meaningless categories
if torch.sum(label_single) == 0:
print('No object, skip')
continue
# generate prompts
text_single = categories[item_idx] if args.use_text_prompt else None
if categories is not None: print(f'inference |{categories[item_idx]}| target...')
points_single = None
box_single = None
if args.use_point_prompt:
point, point_label = select_points(label_single_resize, num_positive_extra=3, num_negative_extra=3)
points_single = (point.unsqueeze(0).float().cuda(), point_label.unsqueeze(0).float().cuda())
binary_points_resize = build_binary_points(point, point_label, label_single_resize.shape)
if args.use_box_prompt: | box_single = generate_box(label_single_resize).unsqueeze(0).float().cuda() | 4 | 2023-11-10 08:25:37+00:00 | 12k |
xk-huang/segment-caption-anything | tests/models/sca/test_modeling_sca.py | [
{
"identifier": "ScaConfig",
"path": "src/models/sca/configuration_sca.py",
"snippet": "class ScaConfig(PretrainedConfig):\n model_type = \"sca\"\n is_composition = True\n\n def __init__(\n self,\n vision_config=None,\n prompt_encoder_config=None,\n mask_caption_deco... | import sys
import pytest
import requests
import torch
import time
import numpy as np
import torch
import transformers
from PIL import Image
from src.models.sca import ScaConfig, ScaModel, ScaProcessor
from typing import Sequence
from torch.nn.utils.rnn import pad_sequence | 7,921 |
sys.path.append(".")
cache_dir = ".model.cache"
device = "cuda" if torch.cuda.is_available() else "cpu"
sam_model_name = "facebook/sam-vit-base"
text_model_name = "gpt2"
additional_num_hidden_layers = 2
@pytest.fixture
def model():
|
sys.path.append(".")
cache_dir = ".model.cache"
device = "cuda" if torch.cuda.is_available() else "cpu"
sam_model_name = "facebook/sam-vit-base"
text_model_name = "gpt2"
additional_num_hidden_layers = 2
@pytest.fixture
def model(): | model = ScaModel.from_sam_text_pretrained( | 2 | 2023-11-17 14:10:41+00:00 | 12k |
artwalker/EasyTranslator | easy_translator.py | [
{
"identifier": "CommandArgs",
"path": "command_args.py",
"snippet": "class CommandArgs:\r\n \"\"\"A class to read the arguments from command line .\"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"Initialize command arguments.\"\"\"\r\n # Use the argparse module in the Python standard li... | from command_args import CommandArgs
from parameter_reader import ParameterReader
from process_file import ProcessFile
| 9,077 |
class TranslateText:
"""Overall class to manage text translation."""
def __init__(self):
"""Initialize parameters about the text translation."""
# 1. Read the command line arguments.
self.commandArgs = CommandArgs()
# 2. Read the parameters from the settings.cfg file and the .env file.
# and process the parameters.
self.parameterReader = ParameterReader(self.commandArgs)
# 3. Prepare to translate the text.
|
class TranslateText:
"""Overall class to manage text translation."""
def __init__(self):
"""Initialize parameters about the text translation."""
# 1. Read the command line arguments.
self.commandArgs = CommandArgs()
# 2. Read the parameters from the settings.cfg file and the .env file.
# and process the parameters.
self.parameterReader = ParameterReader(self.commandArgs)
# 3. Prepare to translate the text.
| self.processFile = ProcessFile(self.parameterReader)
| 2 | 2023-11-10 15:56:06+00:00 | 12k |
ShipBit/wingman-ai | main.py | [
{
"identifier": "AudioRecorder",
"path": "services/audio_recorder.py",
"snippet": "class AudioRecorder(FileCreator):\n def __init__(\n self,\n app_root_dir: str,\n samplerate: int = 44100,\n channels: int = 1,\n ):\n super().__init__(app_root_dir, RECORDING_PATH)... | from os import path
from pynput import keyboard
from services.audio_recorder import AudioRecorder
from services.secret_keeper import SecretKeeper
from services.tower import Tower
from services.printr import Printr
from services.config_manager import ConfigManager
from gui.root import WingmanUI
from wingmen.wingman import Wingman
import sys
import asyncio
import threading | 10,051 |
printr = Printr()
def get_application_root(is_bundled: bool):
if is_bundled:
application_path = sys._MEIPASS
else:
application_path = path.dirname(path.abspath(__file__))
return application_path
class WingmanAI:
def __init__(self):
# pyinstaller things...
self.app_is_bundled = getattr(sys, "frozen", False)
self.app_root_dir = get_application_root(self.app_is_bundled)
self.active = False
self.active_recording = {"key": "", "wingman": None}
self.tower = None
self.config_manager = ConfigManager(self.app_root_dir, self.app_is_bundled)
self.secret_keeper = SecretKeeper(self.app_root_dir)
self.audio_recorder = AudioRecorder(self.app_root_dir)
def load_context(self, context=""):
self.active = False
try:
if self.config_manager:
config = self.config_manager.get_context_config(context)
self.tower = Tower(
config=config,
secret_keeper=self.secret_keeper,
app_root_dir=self.app_root_dir,
)
except FileNotFoundError:
printr.print_err(f"Could not find context.{context}.yaml", True)
except Exception as e:
# Everything else...
printr.print_err(str(e), True)
def activate(self):
if self.tower:
self.active = True
def deactivate(self):
self.active = False
def on_press(self, key):
if self.active and self.tower and self.active_recording["key"] == "":
wingman = self.tower.get_wingman_from_key(key)
if wingman:
self.active_recording = dict(key=key, wingman=wingman)
self.audio_recorder.start_recording()
def on_release(self, key):
if self.active and self.active_recording["key"] == key:
wingman = self.active_recording["wingman"]
recorded_audio_wav = self.audio_recorder.stop_recording()
self.active_recording = dict(key="", wingman=None)
def run_async_process():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
|
printr = Printr()
def get_application_root(is_bundled: bool):
if is_bundled:
application_path = sys._MEIPASS
else:
application_path = path.dirname(path.abspath(__file__))
return application_path
class WingmanAI:
def __init__(self):
# pyinstaller things...
self.app_is_bundled = getattr(sys, "frozen", False)
self.app_root_dir = get_application_root(self.app_is_bundled)
self.active = False
self.active_recording = {"key": "", "wingman": None}
self.tower = None
self.config_manager = ConfigManager(self.app_root_dir, self.app_is_bundled)
self.secret_keeper = SecretKeeper(self.app_root_dir)
self.audio_recorder = AudioRecorder(self.app_root_dir)
def load_context(self, context=""):
self.active = False
try:
if self.config_manager:
config = self.config_manager.get_context_config(context)
self.tower = Tower(
config=config,
secret_keeper=self.secret_keeper,
app_root_dir=self.app_root_dir,
)
except FileNotFoundError:
printr.print_err(f"Could not find context.{context}.yaml", True)
except Exception as e:
# Everything else...
printr.print_err(str(e), True)
def activate(self):
if self.tower:
self.active = True
def deactivate(self):
self.active = False
def on_press(self, key):
if self.active and self.tower and self.active_recording["key"] == "":
wingman = self.tower.get_wingman_from_key(key)
if wingman:
self.active_recording = dict(key=key, wingman=wingman)
self.audio_recorder.start_recording()
def on_release(self, key):
if self.active and self.active_recording["key"] == key:
wingman = self.active_recording["wingman"]
recorded_audio_wav = self.audio_recorder.stop_recording()
self.active_recording = dict(key="", wingman=None)
def run_async_process():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try: | if isinstance(wingman, Wingman): | 6 | 2023-11-15 09:36:06+00:00 | 12k |
derkalle4/python3-idotmatrix-client | core/cmd.py | [
{
"identifier": "Bluetooth",
"path": "core/bluetooth.py",
"snippet": "class Bluetooth:\n address = None\n client = None\n logging = logging.getLogger(\"idotmatrix.\" + __name__)\n mtu_size = None\n\n def __init__(self, address):\n self.logging.debug(\"initialize bluetooth for {}\".... | from datetime import datetime
from PIL import Image
from .bluetooth import Bluetooth
from .idotmatrix.chronograph import Chronograph
from .idotmatrix.clock import Clock
from .idotmatrix.common import Common
from .idotmatrix.countdown import Countdown
from .idotmatrix.gif import Gif
from .idotmatrix.image import Image
from .idotmatrix.fullscreenColor import FullscreenColor
from .idotmatrix.musicSync import MusicSync
from .idotmatrix.scoreboard import Scoreboard
from .idotmatrix.graffiti import Graffiti
import logging
import os
import time | 8,275 | "--pixel-color",
action="append",
help="sets a pixel to a specific color. Could be used multiple times. Format: <PIXEL-X>-<PIXEL-Y>-<R0-255>-<G0-255>-<B0-255> (example: 0-0-255-255-255)",
nargs="+",
)
# scoreboard
parser.add_argument(
"--scoreboard",
action="store",
help="shows the scoreboard with the given scores. Format: <0-999>-<0-999>",
)
# image upload
parser.add_argument(
"--image",
action="store",
help="enables or disables the image mode (true = enable, false = disable)",
)
parser.add_argument(
"--set-image",
action="store",
help="uploads a given image file (fastest is png, max. pixel depending on your display). Format: ./path/to/image.png",
)
parser.add_argument(
"--process-image",
action="store",
help="processes the image instead of sending it raw (useful when the size does not match or it is not a png). Format: <AMOUNT_PIXEL>",
)
# gif upload
parser.add_argument(
"--set-gif",
action="store",
help="uploads a given gif file (pixel depending on your display). Format: ./path/to/image.gif",
)
parser.add_argument(
"--process-gif",
action="store",
help="processes the gif instead of sending it raw (useful when the size does not match). Format: <AMOUNT_PIXEL>",
)
async def run(self, args):
self.logging.info("initializing command line")
address = None
if args.address:
self.logging.debug("using --address")
address = args.address
elif "IDOTMATRIX_ADDRESS" in os.environ:
self.logging.debug("using IDOTMATRIX_ADDRESS")
address = os.environ["IDOTMATRIX_ADDRESS"]
if address is None:
self.logging.error("no device address given")
quit()
else:
self.bluetooth = Bluetooth(address)
# arguments which can be run in parallel
if args.sync_time:
await self.sync_time(args.set_time)
if args.rotate180degrees:
await self.rotate180degrees(args.rotate180degrees)
if args.togglescreen:
await self.togglescreen()
if args.set_brightness:
await self.set_brightness(args.set_brightness)
if args.set_password:
await self.set_password(args.set_password)
# arguments which cannot run in parallel
if args.test:
await self.test()
elif args.chronograph:
await self.chronograph(args.chronograph)
elif args.clock:
await self.clock(args)
elif args.countdown:
await self.countdown(args)
elif args.fullscreen_color:
await self.fullscreenColor(args.fullscreen_color)
elif args.pixel_color:
await self.pixelColor(args.pixel_color)
elif args.scoreboard:
await self.scoreboard(args.scoreboard)
elif args.image:
await self.image(args)
elif args.set_gif:
await self.gif(args)
async def test(self):
"""Tests all available options for the device"""
self.logging.info("starting test of device")
## chronograph
await self.bluetooth.send(Chronograph().setChronograph(1))
time.sleep(5)
await self.bluetooth.send(Chronograph().setChronograph(0))
time.sleep(1)
## clock
await self.bluetooth.send(Clock().setTimeIndicator(True))
await self.bluetooth.send(Clock().setClockMode(0, True, True))
time.sleep(5)
## countdown
await self.bluetooth.send(Countdown().setCountdown(1, 0, 5))
await self.bluetooth.send(Countdown().setCountdown(0, 0, 5))
time.sleep(5)
## fullscreen color
await self.bluetooth.send(FullscreenColor().setColor(255, 0, 0))
time.sleep(5)
## scoreboard
await self.bluetooth.send(Scoreboard().setScoreboard(1, 0))
time.sleep(1)
await self.bluetooth.send(Scoreboard().setScoreboard(1, 1))
time.sleep(1)
await self.bluetooth.send(Scoreboard().setScoreboard(1, 2))
time.sleep(1)
await self.bluetooth.send(Scoreboard().setScoreboard(2, 2))
## graffiti
# load graffiti board and color pixel 0,0 red
await self.bluetooth.send(Graffiti().setPixelColor(255, 0, 0, 0, 0))
# load graffitti board and color pixel 1,1 green
await self.bluetooth.send(Graffiti().setPixelColor(0, 255, 0, 1, 1))
# load graffitti board and color pixel 2,2 blue
await self.bluetooth.send(Graffiti().setPixelColor(0, 0, 255, 2, 2))
time.sleep(5)
## diy image (png)
| # python imports
# idotmatrix imports
class CMD:
bluetooth = None
logging = logging.getLogger("idotmatrix." + __name__)
def add_arguments(self, parser):
# test
parser.add_argument(
"--test",
action="store_true",
help="run the test function from the command line class",
)
# time sync
parser.add_argument(
"--sync-time",
action="store_true",
help="sync time to device",
)
parser.add_argument(
"--set-time",
action="store",
help="optionally set time to sync to device (use with --sync-time)",
default=datetime.now().strftime("%d-%m-%Y-%H:%M:%S"),
)
# device screen rotation
parser.add_argument(
"--rotate180degrees",
action="store",
help="enable 180 degree device rotation (true = enable, false = disable)",
)
# screen toggle
parser.add_argument(
"--togglescreen",
action="store_true",
help="toggles the screen on or off",
)
# brightness
parser.add_argument(
"--set-brightness",
action="store",
help="sets the brightness of the screen in percent: range 5..100",
)
# password
parser.add_argument(
"--set-password",
action="store",
help="sets password",
)
# chronograph
parser.add_argument(
"--chronograph",
action="store",
help="sets the chronograph mode: 0 = reset, 1 = (re)start, 2 = pause, 3 = continue after pause",
)
# clock
parser.add_argument(
"--clock",
action="store",
help="sets the clock mode: 0 = default, 1 = christmas, 2 = racing, 3 = inverted full screen, 4 = animated hourglass, 5 = frame 1, 6 = frame 2, 7 = frame 3",
)
parser.add_argument(
"--clock-with-date",
action="store_true",
help="shows the current date in addition to the current time.",
)
parser.add_argument(
"--clock-24h",
action="store_true",
help="shows the current time in 24h format.",
)
parser.add_argument(
"--clock-color",
action="store",
help="sets the color of the clock. Format: <R0-255>-<G0-255>-<B0-255> (example: 255-255-255)",
default="255-255-255",
)
# countdown
parser.add_argument(
"--countdown",
action="store",
help="sets the countdown mode: 0 = disable, 1 = start, 2 = pause, 3 = restart",
)
parser.add_argument(
"--countdown-time",
action="store",
help="sets the countdown mode: <MINUTES>-<SECONDS> (example: 10-30)",
default="5-0",
)
# fullscreen color
parser.add_argument(
"--fullscreen-color",
action="store",
help="sets a fullscreen color. Format: <R0-255>-<G0-255>-<B0-255> (example: 255-255-255)",
)
# pixel color
parser.add_argument(
"--pixel-color",
action="append",
help="sets a pixel to a specific color. Could be used multiple times. Format: <PIXEL-X>-<PIXEL-Y>-<R0-255>-<G0-255>-<B0-255> (example: 0-0-255-255-255)",
nargs="+",
)
# scoreboard
parser.add_argument(
"--scoreboard",
action="store",
help="shows the scoreboard with the given scores. Format: <0-999>-<0-999>",
)
# image upload
parser.add_argument(
"--image",
action="store",
help="enables or disables the image mode (true = enable, false = disable)",
)
parser.add_argument(
"--set-image",
action="store",
help="uploads a given image file (fastest is png, max. pixel depending on your display). Format: ./path/to/image.png",
)
parser.add_argument(
"--process-image",
action="store",
help="processes the image instead of sending it raw (useful when the size does not match or it is not a png). Format: <AMOUNT_PIXEL>",
)
# gif upload
parser.add_argument(
"--set-gif",
action="store",
help="uploads a given gif file (pixel depending on your display). Format: ./path/to/image.gif",
)
parser.add_argument(
"--process-gif",
action="store",
help="processes the gif instead of sending it raw (useful when the size does not match). Format: <AMOUNT_PIXEL>",
)
async def run(self, args):
self.logging.info("initializing command line")
address = None
if args.address:
self.logging.debug("using --address")
address = args.address
elif "IDOTMATRIX_ADDRESS" in os.environ:
self.logging.debug("using IDOTMATRIX_ADDRESS")
address = os.environ["IDOTMATRIX_ADDRESS"]
if address is None:
self.logging.error("no device address given")
quit()
else:
self.bluetooth = Bluetooth(address)
# arguments which can be run in parallel
if args.sync_time:
await self.sync_time(args.set_time)
if args.rotate180degrees:
await self.rotate180degrees(args.rotate180degrees)
if args.togglescreen:
await self.togglescreen()
if args.set_brightness:
await self.set_brightness(args.set_brightness)
if args.set_password:
await self.set_password(args.set_password)
# arguments which cannot run in parallel
if args.test:
await self.test()
elif args.chronograph:
await self.chronograph(args.chronograph)
elif args.clock:
await self.clock(args)
elif args.countdown:
await self.countdown(args)
elif args.fullscreen_color:
await self.fullscreenColor(args.fullscreen_color)
elif args.pixel_color:
await self.pixelColor(args.pixel_color)
elif args.scoreboard:
await self.scoreboard(args.scoreboard)
elif args.image:
await self.image(args)
elif args.set_gif:
await self.gif(args)
async def test(self):
"""Tests all available options for the device"""
self.logging.info("starting test of device")
## chronograph
await self.bluetooth.send(Chronograph().setChronograph(1))
time.sleep(5)
await self.bluetooth.send(Chronograph().setChronograph(0))
time.sleep(1)
## clock
await self.bluetooth.send(Clock().setTimeIndicator(True))
await self.bluetooth.send(Clock().setClockMode(0, True, True))
time.sleep(5)
## countdown
await self.bluetooth.send(Countdown().setCountdown(1, 0, 5))
await self.bluetooth.send(Countdown().setCountdown(0, 0, 5))
time.sleep(5)
## fullscreen color
await self.bluetooth.send(FullscreenColor().setColor(255, 0, 0))
time.sleep(5)
## scoreboard
await self.bluetooth.send(Scoreboard().setScoreboard(1, 0))
time.sleep(1)
await self.bluetooth.send(Scoreboard().setScoreboard(1, 1))
time.sleep(1)
await self.bluetooth.send(Scoreboard().setScoreboard(1, 2))
time.sleep(1)
await self.bluetooth.send(Scoreboard().setScoreboard(2, 2))
## graffiti
# load graffiti board and color pixel 0,0 red
await self.bluetooth.send(Graffiti().setPixelColor(255, 0, 0, 0, 0))
# load graffitti board and color pixel 1,1 green
await self.bluetooth.send(Graffiti().setPixelColor(0, 255, 0, 1, 1))
# load graffitti board and color pixel 2,2 blue
await self.bluetooth.send(Graffiti().setPixelColor(0, 0, 255, 2, 2))
time.sleep(5)
## diy image (png) | await self.bluetooth.send(Image().show(1)) | 6 | 2023-11-13 14:04:21+00:00 | 12k |
AdmTal/music-graphs | src/generate_music_graph.py | [
{
"identifier": "AnimationFrames",
"path": "src/animation_stuff.py",
"snippet": "class AnimationFrames:\n \"\"\"\n Helper object to organize layered frames in order to produce an animation.\n self._data is a Dict. The Keys are \"layer_ids\", and the values are Lists of \"images\".\n They ar... | import os
import click
import psutil
from graphviz import Graph
from hurry.filesize import size
from concurrent.futures import ThreadPoolExecutor, as_completed
from src.animation_stuff import AnimationFrames
from src.cache_stuff import (
cleanup_cache_dir,
get_cache_dir,
)
from src.graph_stuff import (
animate_bezier_point,
animate_ellipsis_blur,
draw_fading_bezier_curve,
parse_graph,
get_node_positions,
)
from src.midi_stuff import (
get_note_start_times_in_frames,
TRACK_NOTE_DELIMITER,
)
from src.theme_stuff import Theme
from src.video_stuff import (
add_frame_to_video,
finalize_video_with_music,
initialize_video_writer,
) | 8,024 | # For each individual chord, draw the lines
for frame_len, all_notes in notes_in_cords.items():
# The chord lines shoudl not overlap, so sort them according to sort order
if theme.nodes_sorted:
if isinstance(theme.nodes_sorted, bool):
all_notes = sorted(
all_notes,
key=lambda i: int(i.split(TRACK_NOTE_DELIMITER)[1]),
)
else:
all_notes = filter_and_order_custom(
theme.nodes_sorted, all_notes
)
# Use `overlapping_pairs` to make the notes connect as a circle
pairs = overlapping_pairs(all_notes)
for a, b in pairs:
frames = []
for i in range(frame_len):
if b not in edges[a]:
continue
frames.append(
[
draw_fading_bezier_curve,
{
"track": track,
"points": edges[a][b].b_points,
"frame_number": i,
"animation_len": frame_len,
},
]
)
FRAMES.add_frames_to_layer(
f"l1-{track}-{a}-{b}-line", curr_frame, frames
)
curr_notes = [curr_note_tuple[0] for curr_note_tuple in curr_note_tuples]
# Animate the "next note" balls
if prev_notes:
animation_length_in_frames = curr_frame - prev_notes_frame
drawn_to = set()
source_usage = {note: 0 for note in prev_notes}
# New Rule: Check if there are more destinations than sources to determine max usage
max_usage = 2 if len(curr_notes) > len(prev_notes) else 1
if animation_length_in_frames / theme.frame_rate <= 10:
for a in prev_notes:
for b in curr_notes:
if (
b in drawn_to
or (a == b and not theme.allow_self_notes(track))
or source_usage[a] >= max_usage
or b not in edges[a]
):
continue
frames = []
for i in range(animation_length_in_frames):
frame = [
animate_bezier_point,
{
"track": track,
"points": edges[a][b].b_points,
"frame_number": i,
"animation_length_in_frames": animation_length_in_frames,
},
]
frames.append(frame)
FRAMES.add_frames_to_layer(
f"l3-{track}-{a}-{b}-balls", prev_notes_frame, frames
)
drawn_to.add(b)
source_usage[a] += 1
prev_notes = curr_notes
prev_notes_frame = curr_frame
num_frames = len(FRAMES)
if theme.debug_max_frames:
num_frames = theme.debug_max_frames
writer_context = initialize_video_writer(theme.frame_rate)
frames_written = 0
click.echo("\nDrawing frames, writing videos...")
NUM_WORKERS = os.cpu_count()
with writer_context as (writer, video_file_path):
while frames_written < num_frames:
with ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
future_to_frame = {
executor.submit(
process_frame,
current_frame=i,
base_image=base_image.copy(),
theme=theme,
offsets=offsets,
FRAMES=FRAMES,
): i
for i in range(
frames_written, min(frames_written + NUM_WORKERS, num_frames)
)
}
results = []
for future in as_completed(future_to_frame):
frame_index = future_to_frame[future]
frame_image = future.result()
results.append((frame_index, frame_image))
for frame_index, frame_image in sorted(results, key=lambda x: x[0]):
add_frame_to_video(writer, frame_image)
frames_written += 1
usage = size(psutil.Process().memory_info().rss)
click.echo(
f"\rProcessed {frames_written} of {num_frames}... (memory usage={usage})",
nl=False,
)
|
def midi_note_to_pitch_class(midi_note):
_, note = midi_note.split(TRACK_NOTE_DELIMITER)
midi_note = int(note)
note_names = ["C", "Db", "D", "Eb", "E", "F", "F#", "G", "Ab", "A", "Bb", "B"]
return note_names[(midi_note - 1) % 12]
def overlapping_pairs(lst):
return list(zip(lst, lst[1:])) + [(lst[-1], lst[0])] if len(lst) > 1 else []
def create_graphviz_default_sort(theme, track_events_frames):
"""Create a Graphviz without a specified order"""
song_graph = Graph(
"G",
engine=theme.graphviz_engine,
format="xdot",
strict=True,
node_attr=theme.graphviz_node_attrs,
graph_attr=theme.graphviz_graph_attrs,
edge_attr=theme.graphviz_edge_attrs,
)
for track, note_tuples in track_events_frames.items():
if theme.skip_track(track):
continue
notes = [
[note_tuple[0] for note_tuple in list_of_note_tuples]
for frame_num, list_of_note_tuples in note_tuples.items()
]
# Create Nodes
for note in notes:
n = note[0]
song_graph.node(n, label=midi_note_to_pitch_class(n))
# Create Edges
melody_pairs = overlapping_pairs(notes)
for a_notes, b_notes in melody_pairs:
for a in a_notes:
for b in b_notes:
song_graph.node(b, label=midi_note_to_pitch_class(b))
song_graph.edge(a, b)
return song_graph
def filter_and_order_custom(reference_list, input_list):
# Extract the numbers from the input strings and convert them to integers
input_numbers = [int(item.split(TRACK_NOTE_DELIMITER)[1]) for item in input_list]
# Create a mapping of number to original string for reconstruction later
number_to_string = dict(zip(input_numbers, input_list))
# Filter and order the input list based on the reference list
ordered_list = [
number_to_string[item] for item in reference_list if item in number_to_string
]
return ordered_list
def create_graphviz_sorted(theme, track_events_frames):
"""
This function implements a hack to force Graphviz node ordering.
Step 1: Create a bare-bones CIRCO graph with nodes added in order
Step 2: Save that graph to a file, and extract its node positions
Step 3: Generate the final NEATO graph, using hard coded node positions
"""
if theme.graphviz_engine.lower() != "circo":
click.echo(
"ERROR: Node sorting only works when graphviz engine is circo", err=True
)
cleanup_cache_dir(get_cache_dir())
exit(1)
song_graph = Graph(
"G",
engine=theme.graphviz_engine,
format="plain",
strict=True,
node_attr=theme.graphviz_node_attrs,
graph_attr=theme.graphviz_graph_attrs,
edge_attr=theme.graphviz_edge_attrs,
)
all_notes = {}
for track, note_tuples in track_events_frames.items():
if theme.skip_track(track):
continue
notes = [
[note_tuple[0] for note_tuple in list_of_note_tuples]
for frame_num, list_of_note_tuples in note_tuples.items()
]
for note in notes:
all_notes[note[0]] = True
# Create Nodes - In order
prev_note = None
all_notes = list(all_notes.keys())
if theme.nodes_sorted:
if isinstance(theme.nodes_sorted, bool):
all_notes = sorted(
all_notes, key=lambda i: int(i.split(TRACK_NOTE_DELIMITER)[1])
)
else:
all_notes = filter_and_order_custom(theme.nodes_sorted, all_notes)
for n in all_notes + [all_notes[0]]: # tack on the first to make a circle
song_graph.node(n, label=midi_note_to_pitch_class(n))
if prev_note:
song_graph.edge(n, prev_note)
prev_note = n
node_positions = get_node_positions(song_graph)
song_graph = Graph(
"G",
engine="neato",
format="xdot",
strict=True,
node_attr=theme.graphviz_node_attrs,
graph_attr=theme.graphviz_graph_attrs,
edge_attr=theme.graphviz_edge_attrs,
)
for track, note_tuples in track_events_frames.items():
if theme.skip_track(track):
continue
notes = [
[note_tuple[0] for note_tuple in list_of_note_tuples]
for frame_num, list_of_note_tuples in note_tuples.items()
]
# Create Nodes
for note in notes:
n = note[0]
song_graph.node(
n,
label=midi_note_to_pitch_class(n),
_attributes={"pos": node_positions[n]},
)
# Create Edges
melody_pairs = overlapping_pairs(notes)
for a_notes, b_notes in melody_pairs:
for a in a_notes:
for b in b_notes:
song_graph.node(b, label=midi_note_to_pitch_class(b))
song_graph.edge(a, b)
return song_graph
def create_graphviz(theme, track_events_frames):
if theme.nodes_sorted:
return create_graphviz_sorted(theme, track_events_frames)
return create_graphviz_default_sort(theme, track_events_frames)
def process_frame(current_frame, base_image, theme, offsets, FRAMES):
frame_result = base_image.copy()
for layer, layer_images in sorted(FRAMES.items()):
frame = layer_images[current_frame]
if frame:
draw_function, args = frame
frame_result = draw_function(
base_image=frame_result, # Use frame_result instead of base_image
theme=theme,
offsets=offsets,
**args,
)
return frame_result
def generate_music_graph(
midi_file_path,
default_theme_file_path,
theme_file_path,
output_path,
soundfont_file,
):
theme = Theme(theme_file_path, default_theme_file_path)
track_events_frames = get_note_start_times_in_frames(
midi_file_path,
theme.frame_rate,
squash_tracks=theme.squash_tracks,
group_notes_by_track=theme.group_notes_by_track,
)
song_graph = create_graphviz(theme, track_events_frames)
base_image, nodes, edges, offsets = parse_graph(song_graph, theme)
if theme.debug_show_base_image:
base_image.show()
cleanup_cache_dir(get_cache_dir())
exit()
FRAMES = AnimationFrames()
click.echo("Planning out frames...", nl=False)
for track in track_events_frames.keys():
if theme.skip_track(track):
continue
curr_track = track_events_frames[track]
curr_frame = min(curr_track) - 1
prev_notes = None
prev_notes_frame = None
num_notes_processed = 0
click.echo() # NL
max_notes = len(track_events_frames[track])
while num_notes_processed < max_notes and curr_frame <= max(curr_track):
curr_frame += 1
if curr_frame not in curr_track:
continue
usage = size(psutil.Process().memory_info().rss)
click.echo(
f"\r[{track}] Processing {num_notes_processed + 1} of {max_notes} notes... (memory usage={usage})",
nl=False,
)
num_notes_processed += 1
curr_note_tuples = curr_track[curr_frame]
# Animate the Node pulses
for (
current_note,
curr_note_velocity,
curr_note_frame_len,
) in curr_note_tuples:
frames = []
for i in range(curr_note_frame_len):
frame = [
animate_ellipsis_blur,
{
"track": track,
"points": nodes[current_note].e_points,
"frame_number": i,
"animation_len": curr_note_frame_len,
"velocity": curr_note_velocity,
},
]
frames.append(frame)
FRAMES.add_frames_to_layer(
f"l2-{track}-{current_note}", curr_frame, frames
)
if theme.pulses_only:
continue
# Animate the Chord Lines
if len(curr_note_tuples) > 1:
# Split notes in chord up by the frame length, cause multiple chords might be playing
notes_in_cords = {}
for note, velocity, frame_len in curr_note_tuples:
if frame_len not in notes_in_cords:
notes_in_cords[frame_len] = []
notes_in_cords[frame_len].append(note)
# For each individual chord, draw the lines
for frame_len, all_notes in notes_in_cords.items():
# The chord lines shoudl not overlap, so sort them according to sort order
if theme.nodes_sorted:
if isinstance(theme.nodes_sorted, bool):
all_notes = sorted(
all_notes,
key=lambda i: int(i.split(TRACK_NOTE_DELIMITER)[1]),
)
else:
all_notes = filter_and_order_custom(
theme.nodes_sorted, all_notes
)
# Use `overlapping_pairs` to make the notes connect as a circle
pairs = overlapping_pairs(all_notes)
for a, b in pairs:
frames = []
for i in range(frame_len):
if b not in edges[a]:
continue
frames.append(
[
draw_fading_bezier_curve,
{
"track": track,
"points": edges[a][b].b_points,
"frame_number": i,
"animation_len": frame_len,
},
]
)
FRAMES.add_frames_to_layer(
f"l1-{track}-{a}-{b}-line", curr_frame, frames
)
curr_notes = [curr_note_tuple[0] for curr_note_tuple in curr_note_tuples]
# Animate the "next note" balls
if prev_notes:
animation_length_in_frames = curr_frame - prev_notes_frame
drawn_to = set()
source_usage = {note: 0 for note in prev_notes}
# New Rule: Check if there are more destinations than sources to determine max usage
max_usage = 2 if len(curr_notes) > len(prev_notes) else 1
if animation_length_in_frames / theme.frame_rate <= 10:
for a in prev_notes:
for b in curr_notes:
if (
b in drawn_to
or (a == b and not theme.allow_self_notes(track))
or source_usage[a] >= max_usage
or b not in edges[a]
):
continue
frames = []
for i in range(animation_length_in_frames):
frame = [
animate_bezier_point,
{
"track": track,
"points": edges[a][b].b_points,
"frame_number": i,
"animation_length_in_frames": animation_length_in_frames,
},
]
frames.append(frame)
FRAMES.add_frames_to_layer(
f"l3-{track}-{a}-{b}-balls", prev_notes_frame, frames
)
drawn_to.add(b)
source_usage[a] += 1
prev_notes = curr_notes
prev_notes_frame = curr_frame
num_frames = len(FRAMES)
if theme.debug_max_frames:
num_frames = theme.debug_max_frames
writer_context = initialize_video_writer(theme.frame_rate)
frames_written = 0
click.echo("\nDrawing frames, writing videos...")
NUM_WORKERS = os.cpu_count()
with writer_context as (writer, video_file_path):
while frames_written < num_frames:
with ThreadPoolExecutor(max_workers=NUM_WORKERS) as executor:
future_to_frame = {
executor.submit(
process_frame,
current_frame=i,
base_image=base_image.copy(),
theme=theme,
offsets=offsets,
FRAMES=FRAMES,
): i
for i in range(
frames_written, min(frames_written + NUM_WORKERS, num_frames)
)
}
results = []
for future in as_completed(future_to_frame):
frame_index = future_to_frame[future]
frame_image = future.result()
results.append((frame_index, frame_image))
for frame_index, frame_image in sorted(results, key=lambda x: x[0]):
add_frame_to_video(writer, frame_image)
frames_written += 1
usage = size(psutil.Process().memory_info().rss)
click.echo(
f"\rProcessed {frames_written} of {num_frames}... (memory usage={usage})",
nl=False,
)
| finalize_video_with_music( | 12 | 2023-11-17 17:56:04+00:00 | 12k |
Subsets and Splits
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have consistent code formatting levels across multiple scales (2k, 4k, 8k, 12k) and reveals the structured formatting patterns within these repositories.
SQL Console for tianyang/repobench_python_v1.1
Compares cross-file and in-file code structure patterns across different complexity levels, revealing how file organization strategies vary with code size and potentially informing better code architecture decisions.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have complete performance data across all seven code complexity levels, revealing consistent benchmarking patterns across different code sizes.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that contain all 7 distinct quality levels (2k through 32k), revealing complete datasets that might be useful for comprehensive analysis.