repo_name stringlengths 7 71 | file_path stringlengths 5 118 | context list | import_statement stringlengths 45 12.5k | token_num int64 641 99.4k | cropped_code stringlengths 44 17k | all_code stringlengths 43 754k | next_line stringlengths 2 330 | gold_snippet_index int64 0 68 | created_at stringlengths 25 25 | level stringclasses 9
values |
|---|---|---|---|---|---|---|---|---|---|---|
dazhangyu123/ACMIL | Step3_WSI_classification.py | [
{
"identifier": "save_model",
"path": "utils/utils.py",
"snippet": "def save_model(conf, epoch, model, optimizer, is_best=False, is_last=False):\n to_save = {\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'epoch': epoch,\n 'config': conf,\n }\n... | import sys
import os
import yaml
import argparse
import torch
from pprint import pprint
from torch import nn
from torch.utils.data import DataLoader
from utils.utils import save_model, Struct, set_seed, Wandb_Writer
from datasets.datasets import build_HDF5_feat_dataset
from architecture.transformer import TransformWrapper, AttnMIL
from architecture.transMIL import TransMIL
from engine import train_one_epoch, evaluate
from architecture.dsmil import MILNet, FCLayer, BClassifier
from architecture.bmil import probabilistic_MIL_Bayes_spvis
from architecture.clam import CLAM_SB, CLAM_MB
from modules import mean_max
| 10,059 |
# !/usr/bin/env python
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def get_arguments():
parser = argparse.ArgumentParser('Patch classification training', add_help=False)
parser.add_argument('--config', dest='config', default='config/camelyon_medical_ssl_config.yml',
help='settings of Tip-Adapter in yaml format')
parser.add_argument(
"--eval-only", action="store_true", help="evaluation only"
)
parser.add_argument(
"--seed", type=int, default=1, help="set the random seed to ensure reproducibility"
)
parser.add_argument('--wandb_mode', default='disabled', choices=['offline', 'online', 'disabled'],
help='the model of wandb')
parser.add_argument(
"--n_shot", type=int, default=-1, help="number of wsi images"
)
parser.add_argument(
"--w_loss", type=float, default=1.0, help="number of query token"
)
parser.add_argument(
"--arch", type=str, default='transmil', choices=['transmil', 'clam_sb', 'clam_mb', 'attnmil',
'selfattn', 'dsmil', 'bmil_spvis', 'meanmil', 'maxmil'], help="number of query token"
)
parser.add_argument(
"--n_token", type=int, default=1, help="number of query token"
)
parser.add_argument(
"--n_masked_patch", type=int, default=0, help="whether use adversarial mask"
)
args = parser.parse_args()
return args
def main():
# Load config file
args = get_arguments()
# get config
with open(args.config, "r") as ymlfile:
c = yaml.load(ymlfile, Loader=yaml.FullLoader)
c.update(vars(args))
conf = Struct(**c)
group_name = 'ds_%s_%s_arch_%s_%sepochs' % (conf.dataset, conf.pretrain, conf.arch, conf.train_epoch)
log_writer = Wandb_Writer(group_name=group_name, mode=args.wandb_mode, name=args.seed)
conf.ckpt_dir = log_writer.wandb.dir[:-5] + 'saved_models'
if conf.wandb_mode == 'disabled':
conf.ckpt_dir = os.path.join(conf.ckpt_dir, group_name, str(args.seed))
os.makedirs(conf.ckpt_dir, exist_ok=True)
print("Used config:");
pprint(vars(conf));
# Prepare dataset
set_seed(args.seed)
# define datasets and dataloaders
train_data, val_data, test_data = build_HDF5_feat_dataset(os.path.join(conf.data_dir, 'patch_feats_pretrain_%s.h5'%conf.pretrain), conf)
train_loader = DataLoader(train_data, batch_size=conf.B, shuffle=True,
num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=True)
val_loader = DataLoader(val_data, batch_size=conf.B, shuffle=False,
num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=False)
test_loader = DataLoader(test_data, batch_size=conf.B, shuffle=False,
num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=False)
# define network
if conf.arch == 'transmil':
net = TransMIL(conf)
elif conf.arch == 'selfattn':
net = TransformWrapper(conf)
elif conf.arch == 'clam_sb':
net = CLAM_SB(conf).to(device)
elif conf.arch == 'clam_mb':
|
# !/usr/bin/env python
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def get_arguments():
parser = argparse.ArgumentParser('Patch classification training', add_help=False)
parser.add_argument('--config', dest='config', default='config/camelyon_medical_ssl_config.yml',
help='settings of Tip-Adapter in yaml format')
parser.add_argument(
"--eval-only", action="store_true", help="evaluation only"
)
parser.add_argument(
"--seed", type=int, default=1, help="set the random seed to ensure reproducibility"
)
parser.add_argument('--wandb_mode', default='disabled', choices=['offline', 'online', 'disabled'],
help='the model of wandb')
parser.add_argument(
"--n_shot", type=int, default=-1, help="number of wsi images"
)
parser.add_argument(
"--w_loss", type=float, default=1.0, help="number of query token"
)
parser.add_argument(
"--arch", type=str, default='transmil', choices=['transmil', 'clam_sb', 'clam_mb', 'attnmil',
'selfattn', 'dsmil', 'bmil_spvis', 'meanmil', 'maxmil'], help="number of query token"
)
parser.add_argument(
"--n_token", type=int, default=1, help="number of query token"
)
parser.add_argument(
"--n_masked_patch", type=int, default=0, help="whether use adversarial mask"
)
args = parser.parse_args()
return args
def main():
# Load config file
args = get_arguments()
# get config
with open(args.config, "r") as ymlfile:
c = yaml.load(ymlfile, Loader=yaml.FullLoader)
c.update(vars(args))
conf = Struct(**c)
group_name = 'ds_%s_%s_arch_%s_%sepochs' % (conf.dataset, conf.pretrain, conf.arch, conf.train_epoch)
log_writer = Wandb_Writer(group_name=group_name, mode=args.wandb_mode, name=args.seed)
conf.ckpt_dir = log_writer.wandb.dir[:-5] + 'saved_models'
if conf.wandb_mode == 'disabled':
conf.ckpt_dir = os.path.join(conf.ckpt_dir, group_name, str(args.seed))
os.makedirs(conf.ckpt_dir, exist_ok=True)
print("Used config:");
pprint(vars(conf));
# Prepare dataset
set_seed(args.seed)
# define datasets and dataloaders
train_data, val_data, test_data = build_HDF5_feat_dataset(os.path.join(conf.data_dir, 'patch_feats_pretrain_%s.h5'%conf.pretrain), conf)
train_loader = DataLoader(train_data, batch_size=conf.B, shuffle=True,
num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=True)
val_loader = DataLoader(val_data, batch_size=conf.B, shuffle=False,
num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=False)
test_loader = DataLoader(test_data, batch_size=conf.B, shuffle=False,
num_workers=conf.n_worker, pin_memory=conf.pin_memory, drop_last=False)
# define network
if conf.arch == 'transmil':
net = TransMIL(conf)
elif conf.arch == 'selfattn':
net = TransformWrapper(conf)
elif conf.arch == 'clam_sb':
net = CLAM_SB(conf).to(device)
elif conf.arch == 'clam_mb':
| net = CLAM_MB(conf).to(device)
| 15 | 2023-11-12 14:07:34+00:00 | 12k |
zhang-tao-whu/DVIS_Plus | dvis_Plus/data_video/ytvis_eval.py | [
{
"identifier": "YTVOS",
"path": "dvis_Plus/data_video/datasets/ytvis_api/ytvos.py",
"snippet": "class YTVOS:\n def __init__(self, annotation_file=None):\n \"\"\"\n Constructor of Microsoft COCO helper class for reading and visualizing annotations.\n :param annotation_file (str):... | import contextlib
import copy
import io
import itertools
import json
import logging
import numpy as np
import os
import pycocotools.mask as mask_util
import torch
import detectron2.utils.comm as comm
from collections import OrderedDict
from .datasets.ytvis_api.ytvos import YTVOS
from .datasets.ytvis_api.ytvoseval import YTVOSeval
from tabulate import tabulate
from detectron2.config import CfgNode
from detectron2.data import MetadataCatalog
from detectron2.evaluation import DatasetEvaluator
from detectron2.utils.file_io import PathManager
from detectron2.utils.logger import create_small_table | 10,681 | # Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/sukjunhwang/IFC
class YTVISEvaluator(DatasetEvaluator):
"""
Evaluate AR for object proposals, AP for instance detection/segmentation, AP
for keypoint detection outputs using COCO's metrics.
See http://cocodataset.org/#detection-eval and
http://cocodataset.org/#keypoints-eval to understand its metrics.
In addition to COCO, this evaluator is able to support any bounding box detection,
instance segmentation, or keypoint detection dataset.
"""
def __init__(
self,
dataset_name,
tasks=None,
distributed=True,
output_dir=None,
*,
use_fast_impl=True,
):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have either the following corresponding metadata:
"json_file": the path to the COCO format annotation
Or it must be in detectron2's standard dataset format
so it can be converted to COCO format automatically.
tasks (tuple[str]): tasks that can be evaluated under the given
configuration. A task is one of "bbox", "segm", "keypoints".
By default, will infer this automatically from predictions.
distributed (True): if True, will collect results from all ranks and run evaluation
in the main process.
Otherwise, will only evaluate the results in the current process.
output_dir (str): optional, an output directory to dump all
results predicted on the dataset. The dump contains two files:
1. "instances_predictions.pth" a file in torch serialization
format that contains all the raw original predictions.
2. "coco_instances_results.json" a json file in COCO's result
format.
use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP.
Although the results should be very close to the official implementation in COCO
API, it is still recommended to compute results with the official API for use in
papers. The faster implementation also uses more RAM.
"""
self._logger = logging.getLogger(__name__)
self._distributed = distributed
self._output_dir = output_dir
self._use_fast_impl = use_fast_impl
if tasks is not None and isinstance(tasks, CfgNode):
self._logger.warning(
"COCO Evaluator instantiated using config, this is deprecated behavior."
" Please pass in explicit arguments instead."
)
self._tasks = None # Infering it from predictions should be better
else:
self._tasks = tasks
self._cpu_device = torch.device("cpu")
self._metadata = MetadataCatalog.get(dataset_name)
json_file = PathManager.get_local_path(self._metadata.json_file)
with contextlib.redirect_stdout(io.StringIO()):
| # Copyright (c) Facebook, Inc. and its affiliates.
# Modified by Bowen Cheng from https://github.com/sukjunhwang/IFC
class YTVISEvaluator(DatasetEvaluator):
"""
Evaluate AR for object proposals, AP for instance detection/segmentation, AP
for keypoint detection outputs using COCO's metrics.
See http://cocodataset.org/#detection-eval and
http://cocodataset.org/#keypoints-eval to understand its metrics.
In addition to COCO, this evaluator is able to support any bounding box detection,
instance segmentation, or keypoint detection dataset.
"""
def __init__(
self,
dataset_name,
tasks=None,
distributed=True,
output_dir=None,
*,
use_fast_impl=True,
):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have either the following corresponding metadata:
"json_file": the path to the COCO format annotation
Or it must be in detectron2's standard dataset format
so it can be converted to COCO format automatically.
tasks (tuple[str]): tasks that can be evaluated under the given
configuration. A task is one of "bbox", "segm", "keypoints".
By default, will infer this automatically from predictions.
distributed (True): if True, will collect results from all ranks and run evaluation
in the main process.
Otherwise, will only evaluate the results in the current process.
output_dir (str): optional, an output directory to dump all
results predicted on the dataset. The dump contains two files:
1. "instances_predictions.pth" a file in torch serialization
format that contains all the raw original predictions.
2. "coco_instances_results.json" a json file in COCO's result
format.
use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP.
Although the results should be very close to the official implementation in COCO
API, it is still recommended to compute results with the official API for use in
papers. The faster implementation also uses more RAM.
"""
self._logger = logging.getLogger(__name__)
self._distributed = distributed
self._output_dir = output_dir
self._use_fast_impl = use_fast_impl
if tasks is not None and isinstance(tasks, CfgNode):
self._logger.warning(
"COCO Evaluator instantiated using config, this is deprecated behavior."
" Please pass in explicit arguments instead."
)
self._tasks = None # Infering it from predictions should be better
else:
self._tasks = tasks
self._cpu_device = torch.device("cpu")
self._metadata = MetadataCatalog.get(dataset_name)
json_file = PathManager.get_local_path(self._metadata.json_file)
with contextlib.redirect_stdout(io.StringIO()): | self._ytvis_api = YTVOS(json_file) | 0 | 2023-11-14 10:55:11+00:00 | 12k |
ej0cl6/TextEE | TextEE/models/AMRIE/E2Emodel.py | [
{
"identifier": "Graph",
"path": "TextEE/models/AMRIE/graph.py",
"snippet": "class Graph(object):\n def __init__(self, entities, triggers, relations, roles, vocabs, mentions=None):\n \"\"\"\n :param entities (list): A list of entities represented as a tuple of\n (start_offset, en... | import torch
import torch.nn as nn
import dgl
import numpy as np
from .graph import Graph
from transformers import BertModel, RobertaModel, XLMRobertaModel, AutoModel, BertConfig, RobertaConfig, XLMRobertaConfig
from .global_feature import generate_global_feature_vector, generate_global_feature_maps
from .util import normalize_score
from .gnn import FinalGNN | 9,709 |
def calc_conf_score_(self, logits, labels):
batch_size, _, _ = logits.size()
logits_t = logits.transpose(1, 0)
scores = [[] for _ in range(batch_size)]
pre_labels = [self.start] * batch_size
for i, logit in enumerate(logits_t):
logit_exp = logit.unsqueeze(-1).expand(batch_size,
self.label_size,
self.label_size)
trans_exp = self.transition.unsqueeze(0).expand(batch_size,
self.label_size,
self.label_size)
score = logit_exp + trans_exp
score = score.view(-1, self.label_size * self.label_size) \
.softmax(1)
for j in range(batch_size):
cur_label = labels[j][i]
cur_score = score[j][cur_label * self.label_size + pre_labels[j]]
scores[j].append(cur_score)
pre_labels[j] = cur_label
return scores
class AMRIEE2EModel(nn.Module):
def __init__(self,
config,
vocabs,
valid_patterns=None):
super().__init__()
self.if_local = 0
# vocabularies
self.vocabs = vocabs
self.entity_label_stoi = vocabs['entity_label']
self.trigger_label_stoi = vocabs['trigger_label']
self.mention_type_stoi = vocabs['mention_type']
self.entity_type_stoi = vocabs['entity_type']
self.event_type_stoi = vocabs['event_type']
self.relation_type_stoi = vocabs['relation_type']
self.role_type_stoi = vocabs['role_type']
self.entity_label_itos = {i:s for s, i in self.entity_label_stoi.items()}
self.trigger_label_itos = {i:s for s, i in self.trigger_label_stoi.items()}
self.entity_type_itos = {i: s for s, i in self.entity_type_stoi.items()}
self.event_type_itos = {i: s for s, i in self.event_type_stoi.items()}
self.relation_type_itos = {i: s for s, i in self.relation_type_stoi.items()}
self.role_type_itos = {i: s for s, i in self.role_type_stoi.items()}
self.entity_label_num = len(self.entity_label_stoi)
self.trigger_label_num = len(self.trigger_label_stoi)
self.mention_type_num = len(self.mention_type_stoi)
self.entity_type_num = len(self.entity_type_stoi)
self.event_type_num = len(self.event_type_stoi)
self.relation_type_num = len(self.relation_type_stoi)
self.role_type_num = len(self.role_type_stoi)
self.valid_relation_entity = set()
self.valid_event_role = set()
self.valid_role_entity = set()
if valid_patterns:
self.valid_event_role = valid_patterns['event_role']
self.valid_relation_entity = valid_patterns['relation_entity']
self.valid_role_entity = valid_patterns['role_entity']
self.relation_directional = config.relation_directional
self.symmetric_relations = config.symmetric_relations
self.symmetric_relation_idxs = {self.relation_type_stoi[r]
for r in self.symmetric_relations}
# BERT encoder
self.pretrained_model_name = config.pretrained_model_name
self.cache_dir = config.cache_dir
if self.pretrained_model_name.startswith('bert-'):
self.bert = BertModel.from_pretrained(self.pretrained_model_name,
cache_dir=self.cache_dir,
output_hidden_states=True)
self.bert_config = BertConfig.from_pretrained(self.pretrained_model_name,
cache_dir=self.cache_dir)
elif self.pretrained_model_name.startswith('roberta-'):
self.bert = RobertaModel.from_pretrained(self.pretrained_model_name,
cache_dir=self.cache_dir,
output_hidden_states=True)
self.bert_config = RobertaConfig.from_pretrained(self.pretrained_model_name,
cache_dir=self.cache_dir)
elif self.pretrained_model_name.startswith('xlm-'):
self.bert = XLMRobertaModel.from_pretrained(self.pretrained_model_name,
cache_dir=self.cache_dir,
output_hidden_states=True)
self.bert_config = XLMRobertaConfig.from_pretrained(self.pretrained_model_name,
cache_dir=self.cache_dir)
else:
raise ValueError
self.bert_dim = self.bert_config.hidden_size
self.extra_bert = config.extra_bert
self.use_extra_bert = config.use_extra_bert
if self.use_extra_bert:
self.bert_dim *= 2
# print(self.use_extra_bert)
# print(bert_config)
# self.bert = BertModel(bert_config)
self.bert_dropout = nn.Dropout(p=config.bert_dropout)
self.multi_piece = config.multi_piece_strategy
# local classifiers
self.use_entity_type = config.use_entity_type
self.binary_dim = self.bert_dim * 2
linear_bias = config.linear_bias
linear_dropout = config.linear_dropout
entity_hidden_num = config.entity_hidden_num
mention_hidden_num = config.mention_hidden_num
event_hidden_num = config.event_hidden_num
relation_hidden_num = config.relation_hidden_num
role_hidden_num = config.role_hidden_num
self.edge_type_num = config.edge_type_num
self.edge_type_dim = config.edge_type_dim
self.use_graph_encoder = config.use_graph_encoder
gnn_layers = config.gnn_layers
self.lamda = config.lamda
role_input_dim = self.binary_dim + (self.entity_type_num if self.use_entity_type else 0)
self.device = config.gpu_device
# print(self.bert_dim)
if self.use_graph_encoder:
if not self.if_local:
|
def log_sum_exp(tensor, dim=0, keepdim: bool = False):
"""LogSumExp operation used by CRF."""
m, _ = tensor.max(dim, keepdim=keepdim)
if keepdim:
stable_vec = tensor - m
else:
stable_vec = tensor - m.unsqueeze(dim)
return m + (stable_vec.exp().sum(dim, keepdim=keepdim)).log()
def sequence_mask(lens, max_len=None):
"""Generate a sequence mask tensor from sequence lengths, used by CRF."""
batch_size = lens.size(0)
if max_len is None:
max_len = lens.max().item()
ranges = torch.arange(0, max_len, device=lens.device).long()
ranges = ranges.unsqueeze(0).expand(batch_size, max_len)
lens_exp = lens.unsqueeze(1).expand_as(ranges)
mask = ranges < lens_exp
return mask
def token_lens_to_offsets(token_lens):
"""Map token lengths to first word piece indices, used by the sentence
encoder.
:param token_lens (list): token lengths (word piece numbers)
:return (list): first word piece indices (offsets)
"""
max_token_num = max([len(x) for x in token_lens])
offsets = []
for seq_token_lens in token_lens:
seq_offsets = [0]
for l in seq_token_lens[:-1]:
seq_offsets.append(seq_offsets[-1] + l)
offsets.append(seq_offsets + [-1] * (max_token_num - len(seq_offsets)))
return offsets
def token_lens_to_idxs(token_lens):
"""Map token lengths to a word piece index matrix (for torch.gather) and a
mask tensor.
For example (only show a sequence instead of a batch):
token lengths: [1,1,1,3,1]
=>
indices: [[0,0,0], [1,0,0], [2,0,0], [3,4,5], [6,0,0]]
masks: [[1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0],
[0.33, 0.33, 0.33], [1.0, 0.0, 0.0]]
Next, we use torch.gather() to select vectors of word pieces for each token,
and average them as follows (incomplete code):
:param token_lens (list): token lengths.
:return: a index matrix and a mask tensor.
"""
max_token_num = max([len(x) for x in token_lens])
max_token_len = max([max(x) for x in token_lens])
idxs, masks = [], []
for seq_token_lens in token_lens:
seq_idxs, seq_masks = [], []
offset = 0
for token_len in seq_token_lens:
seq_idxs.extend([i + offset for i in range(token_len)]
+ [-1] * (max_token_len - token_len))
seq_masks.extend([1.0 / token_len] * token_len
+ [0.0] * (max_token_len - token_len))
offset += token_len
seq_idxs.extend([-1] * max_token_len * (max_token_num - len(seq_token_lens)))
seq_masks.extend([0.0] * max_token_len * (max_token_num - len(seq_token_lens)))
idxs.append(seq_idxs)
masks.append(seq_masks)
return idxs, masks, max_token_num, max_token_len
def graphs_to_node_idxs(graphs):
"""
:param graphs (list): A list of Graph objects.
:return: entity/trigger index matrix, mask tensor, max number, and max length
"""
entity_idxs, entity_masks = [], []
trigger_idxs, trigger_masks = [], []
max_entity_num = max(max(graph.entity_num for graph in graphs), 1)
max_trigger_num = max(max(graph.trigger_num for graph in graphs), 1)
max_entity_len = max(max([e[1] - e[0] for e in graph.entities] + [1])
for graph in graphs)
max_trigger_len = max(max([t[1] - t[0] for t in graph.triggers] + [1])
for graph in graphs)
for graph in graphs:
seq_entity_idxs, seq_entity_masks = [], []
seq_trigger_idxs, seq_trigger_masks = [], []
for entity in graph.entities:
entity_len = entity[1] - entity[0]
seq_entity_idxs.extend([i for i in range(entity[0], entity[1])])
seq_entity_idxs.extend([0] * (max_entity_len - entity_len))
seq_entity_masks.extend([1.0 / entity_len] * entity_len)
seq_entity_masks.extend([0.0] * (max_entity_len - entity_len))
seq_entity_idxs.extend([0] * max_entity_len * (max_entity_num - graph.entity_num))
seq_entity_masks.extend([0.0] * max_entity_len * (max_entity_num - graph.entity_num))
entity_idxs.append(seq_entity_idxs)
entity_masks.append(seq_entity_masks)
for trigger in graph.triggers:
trigger_len = trigger[1] - trigger[0]
seq_trigger_idxs.extend([i for i in range(trigger[0], trigger[1])])
seq_trigger_idxs.extend([0] * (max_trigger_len - trigger_len))
seq_trigger_masks.extend([1.0 / trigger_len] * trigger_len)
seq_trigger_masks.extend([0.0] * (max_trigger_len - trigger_len))
seq_trigger_idxs.extend([0] * max_trigger_len * (max_trigger_num - graph.trigger_num))
seq_trigger_masks.extend([0.0] * max_trigger_len * (max_trigger_num - graph.trigger_num))
trigger_idxs.append(seq_trigger_idxs)
trigger_masks.append(seq_trigger_masks)
return (
entity_idxs, entity_masks, max_entity_num, max_entity_len,
trigger_idxs, trigger_masks, max_trigger_num, max_trigger_len,
)
def graphs_to_label_idxs(graphs, max_entity_num=-1, max_trigger_num=-1,
relation_directional=False,
symmetric_relation_idxs=None):
"""Convert a list of graphs to label index and mask matrices
:param graphs (list): A list of Graph objects.
:param max_entity_num (int) Max entity number (default = -1).
:param max_trigger_num (int) Max trigger number (default = -1).
"""
if max_entity_num == -1:
max_entity_num = max(max([g.entity_num for g in graphs]), 1)
if max_trigger_num == -1:
max_trigger_num = max(max([g.trigger_num for g in graphs]), 1)
(
batch_entity_idxs, batch_entity_mask,
batch_trigger_idxs, batch_trigger_mask,
batch_relation_idxs, batch_relation_mask,
batch_role_idxs, batch_role_mask
) = [[] for _ in range(8)]
for graph in graphs:
(
entity_idxs, entity_mask, trigger_idxs, trigger_mask,
relation_idxs, relation_mask, role_idxs, role_mask,
) = graph.to_label_idxs(max_entity_num, max_trigger_num,
relation_directional=relation_directional,
symmetric_relation_idxs=symmetric_relation_idxs)
batch_entity_idxs.append(entity_idxs)
batch_entity_mask.append(entity_mask)
batch_trigger_idxs.append(trigger_idxs)
batch_trigger_mask.append(trigger_mask)
batch_relation_idxs.append(relation_idxs)
batch_relation_mask.append(relation_mask)
batch_role_idxs.append(role_idxs)
batch_role_mask.append(role_mask)
return (
batch_entity_idxs, batch_entity_mask,
batch_trigger_idxs, batch_trigger_mask,
batch_relation_idxs, batch_relation_mask,
batch_role_idxs, batch_role_mask
)
def generate_pairwise_idxs(num1, num2):
"""Generate all pairwise combinations among entity mentions (relation) or
event triggers and entity mentions (argument role).
For example, if there are 2 triggers and 3 mentions in a sentence, num1 = 2,
and num2 = 3. We generate the following vector:
idxs = [0, 2, 0, 3, 0, 4, 1, 2, 1, 3, 1, 4]
Suppose `trigger_reprs` and `entity_reprs` are trigger/entity representation
tensors. We concatenate them using:
te_reprs = torch.cat([entity_reprs, entity_reprs], dim=1)
After that we select vectors from `te_reprs` using (incomplete code) to obtain
pairwise combinations of all trigger and entity vectors.
te_reprs = torch.gather(te_reprs, 1, idxs)
te_reprs = te_reprs.view(batch_size, -1, 2 * bert_dim)
:param num1: trigger number (argument role) or entity number (relation)
:param num2: entity number (relation)
:return (list): a list of indices
"""
idxs = []
for i in range(num1):
for j in range(num2):
idxs.append(i)
idxs.append(j + num1)
return idxs
def tag_paths_to_spans(paths, token_nums, vocab):
"""Convert predicted tag paths to a list of spans (entity mentions or event
triggers).
:param paths: predicted tag paths.
:return (list): a list (batch) of lists (sequence) of spans.
"""
batch_mentions = []
itos = {i: s for s, i in vocab.items()}
for i, path in enumerate(paths):
mentions = []
cur_mention = None
path = path.tolist()[:token_nums[i].item()]
for j, tag in enumerate(path):
tag = itos[tag]
if tag == 'O':
prefix = tag = 'O'
else:
prefix, tag = tag.split('-', 1)
if prefix == 'B':
if cur_mention:
mentions.append(cur_mention)
cur_mention = [j, j + 1, tag]
elif prefix == 'I':
if cur_mention is None:
# treat it as B-*
cur_mention = [j, j + 1, tag]
elif cur_mention[-1] == tag:
cur_mention[1] = j + 1
else:
# treat it as B-*
mentions.append(cur_mention)
cur_mention = [j, j + 1, tag]
else:
if cur_mention:
mentions.append(cur_mention)
cur_mention = None
if cur_mention:
mentions.append(cur_mention)
batch_mentions.append(mentions)
return batch_mentions
class Linears(nn.Module):
"""Multiple linear layers with Dropout."""
def __init__(self, dimensions, activation='relu', dropout_prob=0.0, bias=True):
super().__init__()
assert len(dimensions) > 1
self.layers = nn.ModuleList([nn.Linear(dimensions[i], dimensions[i + 1], bias=bias)
for i in range(len(dimensions) - 1)])
self.activation = getattr(torch, activation)
self.dropout = nn.Dropout(dropout_prob)
def forward(self, inputs):
for i, layer in enumerate(self.layers):
if i > 0:
inputs = self.activation(inputs)
inputs = self.dropout(inputs)
inputs = layer(inputs)
return inputs
class CRF(nn.Module):
def __init__(self, label_vocab, bioes=False):
super(CRF, self).__init__()
self.label_vocab = label_vocab
self.label_size = len(label_vocab) + 2
# self.same_type = self.map_same_types()
self.bioes = bioes
self.start = self.label_size - 2
self.end = self.label_size - 1
transition = torch.randn(self.label_size, self.label_size)
self.transition = nn.Parameter(transition)
self.initialize()
def initialize(self):
self.transition.data[:, self.end] = -100.0
self.transition.data[self.start, :] = -100.0
for label, label_idx in self.label_vocab.items():
if label.startswith('I-') or label.startswith('E-'):
self.transition.data[label_idx, self.start] = -100.0
if label.startswith('B-') or label.startswith('I-'):
self.transition.data[self.end, label_idx] = -100.0
for label_from, label_from_idx in self.label_vocab.items():
if label_from == 'O':
label_from_prefix, label_from_type = 'O', 'O'
else:
label_from_prefix, label_from_type = label_from.split('-', 1)
for label_to, label_to_idx in self.label_vocab.items():
if label_to == 'O':
label_to_prefix, label_to_type = 'O', 'O'
else:
label_to_prefix, label_to_type = label_to.split('-', 1)
if self.bioes:
is_allowed = any(
[
label_from_prefix in ['O', 'E', 'S']
and label_to_prefix in ['O', 'B', 'S'],
label_from_prefix in ['B', 'I']
and label_to_prefix in ['I', 'E']
and label_from_type == label_to_type
]
)
else:
is_allowed = any(
[
label_to_prefix in ['B', 'O'],
label_from_prefix in ['B', 'I']
and label_to_prefix == 'I'
and label_from_type == label_to_type
]
)
if not is_allowed:
self.transition.data[
label_to_idx, label_from_idx] = -100.0
def pad_logits(self, logits):
"""Pad the linear layer output with <SOS> and <EOS> scores.
:param logits: Linear layer output (no non-linear function).
"""
batch_size, seq_len, _ = logits.size()
pads = logits.new_full((batch_size, seq_len, 2), -100.0,
requires_grad=False)
logits = torch.cat([logits, pads], dim=2)
return logits
def calc_binary_score(self, labels, lens):
batch_size, seq_len = labels.size()
# A tensor of size batch_size * (seq_len + 2)
labels_ext = labels.new_empty((batch_size, seq_len + 2))
labels_ext[:, 0] = self.start
labels_ext[:, 1:-1] = labels
mask = sequence_mask(lens + 1, max_len=(seq_len + 2)).long()
pad_stop = labels.new_full((1,), self.end, requires_grad=False)
pad_stop = pad_stop.unsqueeze(-1).expand(batch_size, seq_len + 2)
labels_ext = (1 - mask) * pad_stop + mask * labels_ext
labels = labels_ext
trn = self.transition
trn_exp = trn.unsqueeze(0).expand(batch_size, self.label_size,
self.label_size)
lbl_r = labels[:, 1:]
lbl_rexp = lbl_r.unsqueeze(-1).expand(*lbl_r.size(), self.label_size)
# score of jumping to a tag
trn_row = torch.gather(trn_exp, 1, lbl_rexp)
lbl_lexp = labels[:, :-1].unsqueeze(-1)
trn_scr = torch.gather(trn_row, 2, lbl_lexp)
trn_scr = trn_scr.squeeze(-1)
mask = sequence_mask(lens + 1).float()
trn_scr = trn_scr * mask
score = trn_scr
return score
def calc_unary_score(self, logits, labels, lens):
"""Checked"""
labels_exp = labels.unsqueeze(-1)
scores = torch.gather(logits, 2, labels_exp).squeeze(-1)
mask = sequence_mask(lens).float()
scores = scores * mask
return scores
def calc_gold_score(self, logits, labels, lens):
"""Checked"""
unary_score = self.calc_unary_score(logits, labels, lens).sum(
1).squeeze(-1)
binary_score = self.calc_binary_score(labels, lens).sum(1).squeeze(-1)
return unary_score + binary_score
def calc_norm_score(self, logits, lens):
batch_size, _, _ = logits.size()
alpha = logits.new_full((batch_size, self.label_size), -100.0)
alpha[:, self.start] = 0
lens_ = lens.clone()
logits_t = logits.transpose(1, 0)
for logit in logits_t:
logit_exp = logit.unsqueeze(-1).expand(batch_size,
self.label_size,
self.label_size)
alpha_exp = alpha.unsqueeze(1).expand(batch_size,
self.label_size,
self.label_size)
trans_exp = self.transition.unsqueeze(0).expand_as(alpha_exp)
mat = logit_exp + alpha_exp + trans_exp
alpha_nxt = log_sum_exp(mat, 2).squeeze(-1)
mask = (lens_ > 0).float().unsqueeze(-1).expand_as(alpha)
alpha = mask * alpha_nxt + (1 - mask) * alpha
lens_ = lens_ - 1
alpha = alpha + self.transition[self.end].unsqueeze(0).expand_as(alpha)
norm = log_sum_exp(alpha, 1).squeeze(-1)
return norm
def loglik(self, logits, labels, lens):
norm_score = self.calc_norm_score(logits, lens)
gold_score = self.calc_gold_score(logits, labels, lens)
return gold_score - norm_score
def viterbi_decode(self, logits, lens):
"""Borrowed from pytorch tutorial
Arguments:
logits: [batch_size, seq_len, n_labels] FloatTensor
lens: [batch_size] LongTensor
"""
batch_size, _, n_labels = logits.size()
vit = logits.new_full((batch_size, self.label_size), -100.0)
vit[:, self.start] = 0
c_lens = lens.clone()
logits_t = logits.transpose(1, 0)
pointers = []
for logit in logits_t:
vit_exp = vit.unsqueeze(1).expand(batch_size, n_labels, n_labels)
trn_exp = self.transition.unsqueeze(0).expand_as(vit_exp)
vit_trn_sum = vit_exp + trn_exp
vt_max, vt_argmax = vit_trn_sum.max(2)
vt_max = vt_max.squeeze(-1)
vit_nxt = vt_max + logit
pointers.append(vt_argmax.squeeze(-1).unsqueeze(0))
mask = (c_lens > 0).float().unsqueeze(-1).expand_as(vit_nxt)
vit = mask * vit_nxt + (1 - mask) * vit
mask = (c_lens == 1).float().unsqueeze(-1).expand_as(vit_nxt)
vit += mask * self.transition[self.end].unsqueeze(
0).expand_as(vit_nxt)
c_lens = c_lens - 1
pointers = torch.cat(pointers)
scores, idx = vit.max(1)
paths = [idx.unsqueeze(1)]
for argmax in reversed(pointers):
idx_exp = idx.unsqueeze(-1)
idx = torch.gather(argmax, 1, idx_exp)
idx = idx.squeeze(-1)
paths.insert(0, idx.unsqueeze(1))
paths = torch.cat(paths[1:], 1)
scores = scores.squeeze(-1)
return scores, paths
def calc_conf_score_(self, logits, labels):
batch_size, _, _ = logits.size()
logits_t = logits.transpose(1, 0)
scores = [[] for _ in range(batch_size)]
pre_labels = [self.start] * batch_size
for i, logit in enumerate(logits_t):
logit_exp = logit.unsqueeze(-1).expand(batch_size,
self.label_size,
self.label_size)
trans_exp = self.transition.unsqueeze(0).expand(batch_size,
self.label_size,
self.label_size)
score = logit_exp + trans_exp
score = score.view(-1, self.label_size * self.label_size) \
.softmax(1)
for j in range(batch_size):
cur_label = labels[j][i]
cur_score = score[j][cur_label * self.label_size + pre_labels[j]]
scores[j].append(cur_score)
pre_labels[j] = cur_label
return scores
class AMRIEE2EModel(nn.Module):
def __init__(self,
config,
vocabs,
valid_patterns=None):
super().__init__()
self.if_local = 0
# vocabularies
self.vocabs = vocabs
self.entity_label_stoi = vocabs['entity_label']
self.trigger_label_stoi = vocabs['trigger_label']
self.mention_type_stoi = vocabs['mention_type']
self.entity_type_stoi = vocabs['entity_type']
self.event_type_stoi = vocabs['event_type']
self.relation_type_stoi = vocabs['relation_type']
self.role_type_stoi = vocabs['role_type']
self.entity_label_itos = {i:s for s, i in self.entity_label_stoi.items()}
self.trigger_label_itos = {i:s for s, i in self.trigger_label_stoi.items()}
self.entity_type_itos = {i: s for s, i in self.entity_type_stoi.items()}
self.event_type_itos = {i: s for s, i in self.event_type_stoi.items()}
self.relation_type_itos = {i: s for s, i in self.relation_type_stoi.items()}
self.role_type_itos = {i: s for s, i in self.role_type_stoi.items()}
self.entity_label_num = len(self.entity_label_stoi)
self.trigger_label_num = len(self.trigger_label_stoi)
self.mention_type_num = len(self.mention_type_stoi)
self.entity_type_num = len(self.entity_type_stoi)
self.event_type_num = len(self.event_type_stoi)
self.relation_type_num = len(self.relation_type_stoi)
self.role_type_num = len(self.role_type_stoi)
self.valid_relation_entity = set()
self.valid_event_role = set()
self.valid_role_entity = set()
if valid_patterns:
self.valid_event_role = valid_patterns['event_role']
self.valid_relation_entity = valid_patterns['relation_entity']
self.valid_role_entity = valid_patterns['role_entity']
self.relation_directional = config.relation_directional
self.symmetric_relations = config.symmetric_relations
self.symmetric_relation_idxs = {self.relation_type_stoi[r]
for r in self.symmetric_relations}
# BERT encoder
self.pretrained_model_name = config.pretrained_model_name
self.cache_dir = config.cache_dir
if self.pretrained_model_name.startswith('bert-'):
self.bert = BertModel.from_pretrained(self.pretrained_model_name,
cache_dir=self.cache_dir,
output_hidden_states=True)
self.bert_config = BertConfig.from_pretrained(self.pretrained_model_name,
cache_dir=self.cache_dir)
elif self.pretrained_model_name.startswith('roberta-'):
self.bert = RobertaModel.from_pretrained(self.pretrained_model_name,
cache_dir=self.cache_dir,
output_hidden_states=True)
self.bert_config = RobertaConfig.from_pretrained(self.pretrained_model_name,
cache_dir=self.cache_dir)
elif self.pretrained_model_name.startswith('xlm-'):
self.bert = XLMRobertaModel.from_pretrained(self.pretrained_model_name,
cache_dir=self.cache_dir,
output_hidden_states=True)
self.bert_config = XLMRobertaConfig.from_pretrained(self.pretrained_model_name,
cache_dir=self.cache_dir)
else:
raise ValueError
self.bert_dim = self.bert_config.hidden_size
self.extra_bert = config.extra_bert
self.use_extra_bert = config.use_extra_bert
if self.use_extra_bert:
self.bert_dim *= 2
# print(self.use_extra_bert)
# print(bert_config)
# self.bert = BertModel(bert_config)
self.bert_dropout = nn.Dropout(p=config.bert_dropout)
self.multi_piece = config.multi_piece_strategy
# local classifiers
self.use_entity_type = config.use_entity_type
self.binary_dim = self.bert_dim * 2
linear_bias = config.linear_bias
linear_dropout = config.linear_dropout
entity_hidden_num = config.entity_hidden_num
mention_hidden_num = config.mention_hidden_num
event_hidden_num = config.event_hidden_num
relation_hidden_num = config.relation_hidden_num
role_hidden_num = config.role_hidden_num
self.edge_type_num = config.edge_type_num
self.edge_type_dim = config.edge_type_dim
self.use_graph_encoder = config.use_graph_encoder
gnn_layers = config.gnn_layers
self.lamda = config.lamda
role_input_dim = self.binary_dim + (self.entity_type_num if self.use_entity_type else 0)
self.device = config.gpu_device
# print(self.bert_dim)
if self.use_graph_encoder:
if not self.if_local: | self.graph_encoder = FinalGNN(self.bert_dim, self.edge_type_dim, self.edge_type_num, gnn_layers, self.lamda, config.gpu_device) | 4 | 2023-11-15 21:32:56+00:00 | 12k |
chaiNNer-org/spandrel | src/spandrel/architectures/GRLIR/arch/grl.py | [
{
"identifier": "to_2tuple",
"path": "src/spandrel/architectures/__arch_helpers/timm/helpers.py",
"snippet": "def to_2tuple(x: T | Iterable[T]) -> tuple[T, T]:\n if isinstance(x, str):\n return x, x # type: ignore\n if isinstance(x, collections.abc.Iterable):\n return tuple(x) # ty... | from typing import Literal
from ...__arch_helpers.timm.helpers import to_2tuple
from ...__arch_helpers.timm.weight_init import trunc_normal_
from .config import GRLConfig
from .mixed_attn_block_efficient import (
EfficientMixAttnTransformerBlock,
get_stripe_info,
)
from .ops import (
bchw_to_blc,
blc_to_bchw,
calculate_mask,
calculate_mask_all,
get_relative_coords_table_all,
get_relative_position_index_simple,
)
from .swin_v1_block import (
build_last_conv,
)
from .upsample import Upsample, UpsampleOneStep
from fairscale.nn import checkpoint_wrapper # type: ignore
import torch
import torch.nn as nn
import torch.nn.functional as F | 8,600 | self.stripe_size = stripe_size
self.stripe_groups = stripe_groups
self.pretrained_window_size = pretrained_window_size
self.pretrained_stripe_size = pretrained_stripe_size
self.anchor_window_down_factor = anchor_window_down_factor
# Head of the network. First convolution.
self.conv_first = nn.Conv2d(in_channels, embed_dim, 3, 1, 1)
# Body of the network
self.norm_start = norm_layer(embed_dim)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
# stochastic depth decay rule
args = GRLConfig(
out_proj_type=out_proj_type,
local_connection=local_connection,
euclidean_dist=euclidean_dist,
)
for k, v in self.set_table_index_mask(self.input_resolution).items():
self.register_buffer(k, v, persistent=False)
self.layers = nn.ModuleList()
for i in range(len(depths)):
layer = TransformerStage(
dim=embed_dim,
input_resolution=self.input_resolution,
depth=depths[i],
num_heads_window=num_heads_window[i],
num_heads_stripe=num_heads_stripe[i],
window_size=self.window_size,
stripe_size=stripe_size,
stripe_groups=stripe_groups,
stripe_shift=stripe_shift,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qkv_proj_type=qkv_proj_type,
anchor_proj_type=anchor_proj_type,
anchor_one_stage=anchor_one_stage,
anchor_window_down_factor=anchor_window_down_factor,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[
sum(depths[:i]) : sum(
depths[: i + 1]
) # type: ignore
], # no impact on SR results
norm_layer=norm_layer,
pretrained_window_size=pretrained_window_size,
pretrained_stripe_size=pretrained_stripe_size,
conv_type=conv_type,
init_method=init_method,
fairscale_checkpoint=fairscale_checkpoint,
offload_to_cpu=offload_to_cpu,
args=args,
)
self.layers.append(layer)
self.norm_end = norm_layer(embed_dim)
# Tail of the network
self.conv_after_body = build_last_conv(conv_type, embed_dim)
#####################################################################################################
################################ 3, high quality image reconstruction ################################
if self.upsampler == "pixelshuffle":
# for classical SR
self.conv_before_upsample = nn.Sequential(
nn.Conv2d(embed_dim, num_out_feats, 3, 1, 1), nn.LeakyReLU(inplace=True)
)
self.upsample = Upsample(upscale, num_out_feats)
self.conv_last = nn.Conv2d(num_out_feats, out_channels, 3, 1, 1)
elif self.upsampler == "pixelshuffledirect":
# for lightweight SR (to save parameters)
self.upsample = UpsampleOneStep(
upscale,
embed_dim,
out_channels,
)
elif self.upsampler == "nearest+conv":
# for real-world SR (less artifacts)
assert self.upscale == 4, "only support x4 now."
self.conv_before_upsample = nn.Sequential(
nn.Conv2d(embed_dim, num_out_feats, 3, 1, 1), nn.LeakyReLU(inplace=True)
)
self.conv_up1 = nn.Conv2d(num_out_feats, num_out_feats, 3, 1, 1)
self.conv_up2 = nn.Conv2d(num_out_feats, num_out_feats, 3, 1, 1)
self.conv_hr = nn.Conv2d(num_out_feats, num_out_feats, 3, 1, 1)
self.conv_last = nn.Conv2d(num_out_feats, out_channels, 3, 1, 1)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
else:
# for image denoising and JPEG compression artifact reduction
self.conv_last = nn.Conv2d(embed_dim, out_channels, 3, 1, 1)
self.apply(self._init_weights)
if init_method in ["l", "w"] or init_method.find("t") >= 0:
for layer in self.layers:
layer._init_weights()
def set_table_index_mask(self, x_size: tuple[int, int]):
"""
Two used cases:
1) At initialization: set the shared buffers.
2) During forward pass: get the new buffers if the resolution of the input changes
"""
# ss - stripe_size, sss - stripe_shift_size
# ss ~= self.stripe_size
# sss ~= self.stripe_size / 2
ss, sss = get_stripe_info(self.stripe_size, self.stripe_groups, True, x_size)
df = self.anchor_window_down_factor
table_w = get_relative_coords_table_all(
self.window_size, self.pretrained_window_size
)
table_sh = get_relative_coords_table_all(ss, self.pretrained_stripe_size, df)
table_sv = get_relative_coords_table_all(
ss[::-1], self.pretrained_stripe_size, df
)
| """
Efficient and Explicit Modelling of Image Hierarchies for Image Restoration
Image restoration transformers with global, regional, and local modelling
A clean version of the.
Shared buffers are used for relative_coords_table, relative_position_index, and attn_mask.
"""
from __future__ import annotations
class TransformerStage(nn.Module):
"""Transformer stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads_window (list[int]): Number of window attention heads in different layers.
num_heads_stripe (list[int]): Number of stripe attention heads in different layers.
stripe_size (list[int]): Stripe size. Default: [8, 8]
stripe_groups (list[int]): Number of stripe groups. Default: [None, None].
stripe_shift (bool): whether to shift the stripes. This is used as an ablation study.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qkv_proj_type (str): QKV projection type. Default: linear. Choices: linear, separable_conv.
anchor_proj_type (str): Anchor projection type. Default: avgpool. Choices: avgpool, maxpool, conv2d, separable_conv, patchmerging.
anchor_one_stage (bool): Whether to use one operator or multiple progressive operators to reduce feature map resolution. Default: True.
anchor_window_down_factor (int): The downscale factor used to get the anchors.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
pretrained_window_size (list[int]): pretrained window size. This is actually not used. Default: [0, 0].
pretrained_stripe_size (list[int]): pretrained stripe size. This is actually not used. Default: [0, 0].
conv_type: The convolutional block before residual connection.
init_method: initialization method of the weight parameters used to train large scale models.
Choices: n, normal -- Swin V1 init method.
l, layernorm -- Swin V2 init method. Zero the weight and bias in the post layer normalization layer.
r, res_rescale -- EDSR rescale method. Rescale the residual blocks with a scaling factor 0.1
w, weight_rescale -- MSRResNet rescale method. Rescale the weight parameter in residual blocks with a scaling factor 0.1
t, trunc_normal_ -- nn.Linear, trunc_normal; nn.Conv2d, weight_rescale
fairscale_checkpoint (bool): Whether to use fairscale checkpoint.
offload_to_cpu (bool): used by fairscale_checkpoint
args:
out_proj_type (str): Type of the output projection in the self-attention modules. Default: linear. Choices: linear, conv2d.
local_connection (bool): Whether to enable the local modelling module (two convs followed by Channel attention). For GRL base model, this is used. "local_connection": local_connection,
euclidean_dist (bool): use Euclidean distance or inner product as the similarity metric. An ablation study.
"""
def __init__(
self,
dim: int,
input_resolution: tuple[int, int],
depth: int,
num_heads_window: int,
num_heads_stripe: int,
window_size: tuple[int, int],
stripe_size,
stripe_groups,
stripe_shift,
mlp_ratio=4.0,
qkv_bias=True,
qkv_proj_type="linear",
anchor_proj_type="avgpool",
anchor_one_stage=True,
anchor_window_down_factor=1,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
norm_layer=nn.LayerNorm,
pretrained_window_size=[0, 0],
pretrained_stripe_size=[0, 0],
conv_type="1conv",
init_method="",
fairscale_checkpoint=False,
offload_to_cpu=False,
args: GRLConfig = None, # type: ignore
):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.init_method = init_method
self.blocks = nn.ModuleList()
for i in range(depth):
block = EfficientMixAttnTransformerBlock(
dim=dim,
input_resolution=input_resolution,
num_heads_w=num_heads_window,
num_heads_s=num_heads_stripe,
window_size=window_size,
window_shift=i % 2 == 0,
stripe_size=stripe_size,
stripe_groups=stripe_groups,
stripe_type="H" if i % 2 == 0 else "W",
stripe_shift=i % 4 in [2, 3] if stripe_shift else False,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qkv_proj_type=qkv_proj_type,
anchor_proj_type=anchor_proj_type,
anchor_one_stage=anchor_one_stage,
anchor_window_down_factor=anchor_window_down_factor,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
pretrained_window_size=pretrained_window_size,
pretrained_stripe_size=pretrained_stripe_size,
res_scale=0.1 if init_method == "r" else 1.0,
args=args,
)
# print(fairscale_checkpoint, offload_to_cpu)
if fairscale_checkpoint:
block = checkpoint_wrapper(block, offload_to_cpu=offload_to_cpu)
self.blocks.append(block)
self.conv = build_last_conv(conv_type, dim)
def _init_weights(self):
for n, m in self.named_modules():
if self.init_method == "w":
if isinstance(m, (nn.Linear, nn.Conv2d)) and n.find("cpb_mlp") < 0:
print("nn.Linear and nn.Conv2d weight initilization")
m.weight.data *= 0.1
elif self.init_method == "l":
if isinstance(m, nn.LayerNorm):
print("nn.LayerNorm initialization")
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 0)
elif self.init_method.find("t") >= 0:
scale = 0.1 ** (len(self.init_method) - 1) * int(self.init_method[-1])
if isinstance(m, nn.Linear) and n.find("cpb_mlp") < 0:
trunc_normal_(m.weight, std=scale)
elif isinstance(m, nn.Conv2d):
m.weight.data *= 0.1
print(
"Initialization nn.Linear - trunc_normal; nn.Conv2d - weight rescale."
)
else:
raise NotImplementedError(
f"Parameter initialization method {self.init_method} not implemented in TransformerStage."
)
def forward(self, x, x_size, table_index_mask):
res = x
for blk in self.blocks:
res = blk(res, x_size, table_index_mask)
res = bchw_to_blc(self.conv(blc_to_bchw(res, x_size)))
return res + x
def flops(self):
pass
class GRL(nn.Module):
r"""Image restoration transformer with global, non-local, and local connections
Args:
img_size (int | list[int]): Input image size. Default 64
in_channels (int): Number of input image channels. Default: 3
out_channels (int): Number of output image channels. Default: None
embed_dim (int): Patch embedding dimension. Default: 96
upscale (int): Upscale factor. 2/3/4/8 for image SR, 1 for denoising and compress artifact reduction
img_range (float): Image range. 1. or 255.
upsampler (str): The reconstruction reconstruction module. 'pixelshuffle'/'pixelshuffledirect'/'nearest+conv'/None
depths (list[int]): Depth of each Swin Transformer layer.
num_heads_window (list[int]): Number of window attention heads in different layers.
num_heads_stripe (list[int]): Number of stripe attention heads in different layers.
window_size (int): Window size. Default: 8.
stripe_size (list[int]): Stripe size. Default: [8, 8]
stripe_groups (list[int]): Number of stripe groups. Default: [None, None].
stripe_shift (bool): whether to shift the stripes. This is used as an ablation study.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qkv_proj_type (str): QKV projection type. Default: linear. Choices: linear, separable_conv.
anchor_proj_type (str): Anchor projection type. Default: avgpool. Choices: avgpool, maxpool, conv2d, separable_conv, patchmerging.
anchor_one_stage (bool): Whether to use one operator or multiple progressive operators to reduce feature map resolution. Default: True.
anchor_window_down_factor (int): The downscale factor used to get the anchors.
out_proj_type (str): Type of the output projection in the self-attention modules. Default: linear. Choices: linear, conv2d.
local_connection (bool): Whether to enable the local modelling module (two convs followed by Channel attention). For GRL base model, this is used.
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
pretrained_window_size (list[int]): pretrained window size. This is actually not used. Default: [0, 0].
pretrained_stripe_size (list[int]): pretrained stripe size. This is actually not used. Default: [0, 0].
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
conv_type (str): The convolutional block before residual connection. Default: 1conv. Choices: 1conv, 3conv, 1conv1x1, linear
init_method: initialization method of the weight parameters used to train large scale models.
Choices: n, normal -- Swin V1 init method.
l, layernorm -- Swin V2 init method. Zero the weight and bias in the post layer normalization layer.
r, res_rescale -- EDSR rescale method. Rescale the residual blocks with a scaling factor 0.1
w, weight_rescale -- MSRResNet rescale method. Rescale the weight parameter in residual blocks with a scaling factor 0.1
t, trunc_normal_ -- nn.Linear, trunc_normal; nn.Conv2d, weight_rescale
fairscale_checkpoint (bool): Whether to use fairscale checkpoint.
offload_to_cpu (bool): used by fairscale_checkpoint
euclidean_dist (bool): use Euclidean distance or inner product as the similarity metric. An ablation study.
"""
def __init__(
self,
img_size=64,
in_channels: int = 3,
out_channels: int | None = None,
embed_dim=96,
upscale=2,
img_range=1.0,
upsampler="",
depths: list[int] = [6, 6, 6, 6, 6, 6],
num_heads_window: list[int] = [3, 3, 3, 3, 3, 3],
num_heads_stripe: list[int] = [3, 3, 3, 3, 3, 3],
window_size=8,
stripe_size: list[int] = [8, 8], # used for stripe window attention
stripe_groups: list[int | None] = [None, None],
stripe_shift=False,
mlp_ratio=4.0,
qkv_bias=True,
qkv_proj_type="linear",
anchor_proj_type="avgpool",
anchor_one_stage=True,
anchor_window_down_factor=1,
out_proj_type: Literal["linear", "conv2d"] = "linear",
local_connection=False,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
pretrained_window_size: list[int] = [0, 0],
pretrained_stripe_size: list[int] = [0, 0],
conv_type="1conv",
init_method="n", # initialization method of the weight parameters used to train large scale models.
fairscale_checkpoint=False, # fairscale activation checkpointing
offload_to_cpu=False,
euclidean_dist=False,
):
super().__init__()
# Process the input arguments
out_channels = out_channels or in_channels
self.in_channels = in_channels
self.out_channels = out_channels
num_out_feats = 64
self.embed_dim = embed_dim
self.upscale = upscale
self.upsampler = upsampler
self.img_range = img_range
if in_channels == 3:
rgb_mean = (0.4488, 0.4371, 0.4040)
self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
else:
self.mean = torch.zeros(1, 1, 1, 1)
max_stripe_size = max([0 if s is None else s for s in stripe_size]) # type: ignore
max_stripe_groups = max([0 if s is None else s for s in stripe_groups])
max_stripe_groups *= anchor_window_down_factor
self.pad_size = max(window_size, max_stripe_size, max_stripe_groups)
# if max_stripe_size >= window_size:
# self.pad_size *= anchor_window_down_factor
# if stripe_groups[0] is None and stripe_groups[1] is None:
# self.pad_size = max(stripe_size)
# else:
# self.pad_size = window_size
self.input_resolution = to_2tuple(img_size)
self.window_size = to_2tuple(window_size)
self.shift_size = [w // 2 for w in self.window_size]
self.stripe_size = stripe_size
self.stripe_groups = stripe_groups
self.pretrained_window_size = pretrained_window_size
self.pretrained_stripe_size = pretrained_stripe_size
self.anchor_window_down_factor = anchor_window_down_factor
# Head of the network. First convolution.
self.conv_first = nn.Conv2d(in_channels, embed_dim, 3, 1, 1)
# Body of the network
self.norm_start = norm_layer(embed_dim)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]
# stochastic depth decay rule
args = GRLConfig(
out_proj_type=out_proj_type,
local_connection=local_connection,
euclidean_dist=euclidean_dist,
)
for k, v in self.set_table_index_mask(self.input_resolution).items():
self.register_buffer(k, v, persistent=False)
self.layers = nn.ModuleList()
for i in range(len(depths)):
layer = TransformerStage(
dim=embed_dim,
input_resolution=self.input_resolution,
depth=depths[i],
num_heads_window=num_heads_window[i],
num_heads_stripe=num_heads_stripe[i],
window_size=self.window_size,
stripe_size=stripe_size,
stripe_groups=stripe_groups,
stripe_shift=stripe_shift,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qkv_proj_type=qkv_proj_type,
anchor_proj_type=anchor_proj_type,
anchor_one_stage=anchor_one_stage,
anchor_window_down_factor=anchor_window_down_factor,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[
sum(depths[:i]) : sum(
depths[: i + 1]
) # type: ignore
], # no impact on SR results
norm_layer=norm_layer,
pretrained_window_size=pretrained_window_size,
pretrained_stripe_size=pretrained_stripe_size,
conv_type=conv_type,
init_method=init_method,
fairscale_checkpoint=fairscale_checkpoint,
offload_to_cpu=offload_to_cpu,
args=args,
)
self.layers.append(layer)
self.norm_end = norm_layer(embed_dim)
# Tail of the network
self.conv_after_body = build_last_conv(conv_type, embed_dim)
#####################################################################################################
################################ 3, high quality image reconstruction ################################
if self.upsampler == "pixelshuffle":
# for classical SR
self.conv_before_upsample = nn.Sequential(
nn.Conv2d(embed_dim, num_out_feats, 3, 1, 1), nn.LeakyReLU(inplace=True)
)
self.upsample = Upsample(upscale, num_out_feats)
self.conv_last = nn.Conv2d(num_out_feats, out_channels, 3, 1, 1)
elif self.upsampler == "pixelshuffledirect":
# for lightweight SR (to save parameters)
self.upsample = UpsampleOneStep(
upscale,
embed_dim,
out_channels,
)
elif self.upsampler == "nearest+conv":
# for real-world SR (less artifacts)
assert self.upscale == 4, "only support x4 now."
self.conv_before_upsample = nn.Sequential(
nn.Conv2d(embed_dim, num_out_feats, 3, 1, 1), nn.LeakyReLU(inplace=True)
)
self.conv_up1 = nn.Conv2d(num_out_feats, num_out_feats, 3, 1, 1)
self.conv_up2 = nn.Conv2d(num_out_feats, num_out_feats, 3, 1, 1)
self.conv_hr = nn.Conv2d(num_out_feats, num_out_feats, 3, 1, 1)
self.conv_last = nn.Conv2d(num_out_feats, out_channels, 3, 1, 1)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
else:
# for image denoising and JPEG compression artifact reduction
self.conv_last = nn.Conv2d(embed_dim, out_channels, 3, 1, 1)
self.apply(self._init_weights)
if init_method in ["l", "w"] or init_method.find("t") >= 0:
for layer in self.layers:
layer._init_weights()
def set_table_index_mask(self, x_size: tuple[int, int]):
"""
Two used cases:
1) At initialization: set the shared buffers.
2) During forward pass: get the new buffers if the resolution of the input changes
"""
# ss - stripe_size, sss - stripe_shift_size
# ss ~= self.stripe_size
# sss ~= self.stripe_size / 2
ss, sss = get_stripe_info(self.stripe_size, self.stripe_groups, True, x_size)
df = self.anchor_window_down_factor
table_w = get_relative_coords_table_all(
self.window_size, self.pretrained_window_size
)
table_sh = get_relative_coords_table_all(ss, self.pretrained_stripe_size, df)
table_sv = get_relative_coords_table_all(
ss[::-1], self.pretrained_stripe_size, df
)
| index_w = get_relative_position_index_simple(self.window_size) | 10 | 2023-11-17 01:11:47+00:00 | 12k |
motexture/VSeq2VSeq | models/unet.py | [
{
"identifier": "TransformerTemporalModel",
"path": "models/transformers.py",
"snippet": "class TransformerTemporalModel(ModelMixin, ConfigMixin):\n @register_to_config\n def __init__(\n self,\n num_attention_heads: int = 16,\n attention_head_dim: int = 88,\n in_channel... | from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
from safetensors.torch import load_file
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.utils import BaseOutput, logging
from diffusers.models.embeddings import TimestepEmbedding, Timesteps
from diffusers.models.modeling_utils import ModelMixin
from diffusers.utils import WEIGHTS_NAME
from .transformers import TransformerTemporalModel
from .resnet import Conditioner
from .unet_blocks import (
CrossAttnDownBlock3D,
CrossAttnUpBlock3D,
DownBlock3D,
UNetMidBlock3DCrossAttn,
UpBlock3D,
get_down_block,
get_up_block
)
import torch
import torch.nn as nn
import torch.utils.checkpoint
import os | 10,463 |
# count how many layers upsample the images
self.num_upsamplers = 0
# up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_attention_head_dim = list(reversed(attention_head_dim))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=reversed_attention_head_dim[i]
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
if norm_num_groups is not None:
self.conv_norm_out = nn.GroupNorm(
num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps
)
self.conv_act = nn.SiLU()
else:
self.conv_norm_out = None
self.conv_act = None
conv_out_padding = (conv_out_kernel - 1) // 2
self.conv_out = Conditioner(block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding)
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_slicable_layers * [1]
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def _set_gradient_checkpointing(self, value=False):
self.gradient_checkpointing = value
self.mid_block.gradient_checkpointing = value
for module in self.down_blocks + self.up_blocks:
| # Copyright 2023 Alibaba DAMO-VILAB and The HuggingFace Team. All rights reserved.
# Copyright 2023 The ModelScope Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet3DConditionOutput(BaseOutput):
"""
Args:
sample (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model.
"""
sample: torch.FloatTensor
class UNet3DConditionModel(ModelMixin, ConfigMixin):
r"""
UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep
and returns sample shaped output.
This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
implements for all the models (such as downloading or saving, etc.)
Parameters:
sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
Height and width of input/output sample.
in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.
out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.
down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
The tuple of downsample blocks to use.
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`):
The tuple of upsample blocks to use.
block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
The tuple of output channels for each block.
layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
If `None`, it will skip the normalization and activation layers in post-processing
norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.
attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
"""
_supports_gradient_checkpointing = True
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 4,
out_channels: int = 4,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"CrossAttnDownBlock3D",
"DownBlock3D",
),
up_block_types: Tuple[str] = ("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D"),
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: int = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: int = 1024,
attention_head_dim: Union[int, Tuple[int]] = 64,
):
super().__init__()
self.sample_size = sample_size
self.gradient_checkpointing = False
# Check inputs
if len(down_block_types) != len(up_block_types):
raise ValueError(
f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
)
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
)
# input
conv_in_kernel = 3
conv_out_kernel = 3
conv_in_padding = (conv_in_kernel - 1) // 2
self.conv_in = Conditioner(in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding)
# time
time_embed_dim = block_out_channels[0] * 4
self.time_proj = Timesteps(block_out_channels[0], True, 0)
timestep_input_dim = block_out_channels[0]
self.hidden_time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
)
self.conditioning_time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
)
self.transformer_in = TransformerTemporalModel(
num_attention_heads=8,
attention_head_dim=attention_head_dim,
in_channels=block_out_channels[0],
num_layers=1,
)
# class embedding
self.down_blocks = nn.ModuleList([])
self.up_blocks = nn.ModuleList([])
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block,
in_channels=input_channel,
out_channels=output_channel,
temb_channels=time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[i],
downsample_padding=downsample_padding
)
self.down_blocks.append(down_block)
# mid
self.mid_block = UNetMidBlock3DCrossAttn(
in_channels=block_out_channels[-1],
temb_channels=time_embed_dim,
resnet_eps=norm_eps,
output_scale_factor=mid_block_scale_factor,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=attention_head_dim[-1],
resnet_groups=norm_num_groups
)
# count how many layers upsample the images
self.num_upsamplers = 0
# up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_attention_head_dim = list(reversed(attention_head_dim))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=layers_per_block + 1,
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim,
attn_num_head_channels=reversed_attention_head_dim[i]
)
self.up_blocks.append(up_block)
prev_output_channel = output_channel
# out
if norm_num_groups is not None:
self.conv_norm_out = nn.GroupNorm(
num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps
)
self.conv_act = nn.SiLU()
else:
self.conv_norm_out = None
self.conv_act = None
conv_out_padding = (conv_out_kernel - 1) // 2
self.conv_out = Conditioner(block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding)
def set_attention_slice(self, slice_size):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module will split the input tensor in slices, to compute attention
in several steps. This is useful to save some memory in exchange for a small speed decrease.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
`"max"`, maxium amount of memory will be saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_slicable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_slicable_dims(module)
num_slicable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_slicable_layers * [1]
slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def _set_gradient_checkpointing(self, value=False):
self.gradient_checkpointing = value
self.mid_block.gradient_checkpointing = value
for module in self.down_blocks + self.up_blocks: | if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)): | 2 | 2023-11-14 09:09:09+00:00 | 12k |
TCLResearchEurope/torch-dag | torch_dag_algorithms/pruning/masking_inserter.py | [
{
"identifier": "DagModule",
"path": "torch_dag/core/dag_module.py",
"snippet": "class DagModule(torch.nn.Module):\n MAX_LEN_REPR = None\n\n def __init__(\n self,\n name: str,\n vertices: Optional[List[Vertex]] = None,\n output_vertex: Optional[InnerVert... | import logging
from typing import Optional, Union
from torch_dag.core.dag_module import DagModule
from torch_dag_algorithms.pruning.modules import MaskModule, OrbitModule
from torch_dag_algorithms.pruning import commons
from torch_dag_algorithms.pruning.commons import get_source_out_channels
from torch_dag_algorithms.pruning.masking_insertion_strategy import MaskingInsertionStrategy
from torch_dag_algorithms.pruning.orbit import Orbit
from torch_dag.core import dag_module_utils | 8,136 |
logger = logging.getLogger(__name__)
class MaskInserter:
def __init__(
self,
masking_strategy: MaskingInsertionStrategy,
block_size: Optional[int] = None
):
self.masking_strategy = masking_strategy
self.block_size = block_size
def insert_masking(
self,
|
logger = logging.getLogger(__name__)
class MaskInserter:
def __init__(
self,
masking_strategy: MaskingInsertionStrategy,
block_size: Optional[int] = None
):
self.masking_strategy = masking_strategy
self.block_size = block_size
def insert_masking(
self, | dag: DagModule, | 0 | 2023-11-17 15:36:44+00:00 | 12k |
newcastleuniversity/DISPEL | dispel/providers/generic/preprocessing.py | [
{
"identifier": "Level",
"path": "dispel/data/levels.py",
"snippet": "class Level(Epoch):\n \"\"\"An entity to separate sub-task inside each test (Levels).\n\n FIXME: DOC\n\n Attributes\n ----------\n context\n Contextual information about the level\n measure_set\n A :cla... | from typing import Iterable, List, Optional, Set, Tuple
from dispel.data.levels import Level
from dispel.data.raw import DEFAULT_COLUMNS, GRAVITY_COLUMNS
from dispel.processing import ProcessingStep
from dispel.processing.level import (
DefaultLevelFilter,
LevelFilter,
LevelFilterType,
LevelIdFilter,
ProcessingStepGroup,
)
from dispel.processing.modalities import LimbModality, SensorModality
from dispel.processing.transform import Apply
from dispel.providers.generic.sensor import (
ComputeGravityRotationMatrices,
Resample,
RotateSensorWithGravityRotationMatrices,
SetTimestampIndex,
TransformGyroscope,
TransformUserAcceleration,
)
from dispel.signal.filter import butterworth_high_pass_filter, savgol_filter
from dispel.signal.sensor import check_amplitude, detrend_signal | 10,636 | """Core functionalities to preprocess signal data."""
class FilterSensorNoise(Apply):
r"""Apply a filter that will remove any sensor noise into a given dataset.
This filter is a Savitzky-Golay one.
Parameters
----------
data_set_id
The data set id on which the transformation is to be performed ('accelerometer',
'gyroscope').
columns
The columns onto which the filtering step has to be applied.
kwargs
Additional arguments that are passed to the
:meth:`~dispel.processing.core.ProcessingStep.process` function of each step. This
allows to provide additional values, such as placeholder values in value
definitions to the actual processing function.
Notes
-----
The Savitzky-Golay is tuned as in [Martinez et. al. 2012]_ to remove sensor noise
and to smooth the signal. The windows size is thus set up to 41 points and the
filter is of order-3.
"""
def __init__(self, data_set_id: str, columns: Optional[List[str]] = None, **kwargs):
| """Core functionalities to preprocess signal data."""
class FilterSensorNoise(Apply):
r"""Apply a filter that will remove any sensor noise into a given dataset.
This filter is a Savitzky-Golay one.
Parameters
----------
data_set_id
The data set id on which the transformation is to be performed ('accelerometer',
'gyroscope').
columns
The columns onto which the filtering step has to be applied.
kwargs
Additional arguments that are passed to the
:meth:`~dispel.processing.core.ProcessingStep.process` function of each step. This
allows to provide additional values, such as placeholder values in value
definitions to the actual processing function.
Notes
-----
The Savitzky-Golay is tuned as in [Martinez et. al. 2012]_ to remove sensor noise
and to smooth the signal. The windows size is thus set up to 41 points and the
filter is of order-3.
"""
def __init__(self, data_set_id: str, columns: Optional[List[str]] = None, **kwargs): | columns = columns or DEFAULT_COLUMNS | 1 | 2023-11-14 10:06:46+00:00 | 12k |
NevermindNilas/TheAnimeScripter | src/segment/train.py | [
{
"identifier": "InSPyReNet",
"path": "src/segment/model/inspyrenet.py",
"snippet": "class InSPyReNet(nn.Module):\n def __init__(\n self,\n backbone,\n in_channels,\n depth=64,\n base_size=(384, 384),\n threshold: Optional[int] = 512,\n **kwargs,\n ... | import pytorch_lightning as pl
import torch
import torch.nn.functional as F
import torch.optim as optim
from pytorch_lightning import Trainer
from .model import ISNetDIS, ISNetGTEncoder, U2NET, U2NET_full2, U2NET_lite2, MODNet \
, InSPyReNet, InSPyReNet_Res2Net50, InSPyReNet_SwinB | 8,241 |
# warnings.filterwarnings("ignore")
net_names = ["isnet_is", "isnet", "isnet_gt", "u2net", "u2netl", "modnet", "inspyrnet_res", "inspyrnet_swin"]
def get_net(net_name, img_size):
if net_name == "isnet":
return ISNetDIS()
elif net_name == "isnet_is":
return ISNetDIS()
elif net_name == "isnet_gt":
return ISNetGTEncoder()
elif net_name == "u2net":
return U2NET_full2()
elif net_name == "u2netl":
return U2NET_lite2()
elif net_name == "modnet":
return MODNet()
elif net_name == "inspyrnet_res":
return InSPyReNet_Res2Net50(base_size=img_size)
elif net_name == "inspyrnet_swin":
return InSPyReNet_SwinB(base_size=img_size)
raise NotImplemented
class AnimeSegmentation(pl.LightningModule):
def __init__(self, net_name, img_size=None, lr=1e-3):
super().__init__()
assert net_name in net_names
self.img_size = img_size
self.lr = lr
self.net = get_net(net_name, img_size)
if net_name == "isnet_is":
self.gt_encoder = get_net("isnet_gt", img_size)
self.gt_encoder.requires_grad_(False)
else:
self.gt_encoder = None
@classmethod
def try_load(cls, net_name, ckpt_path, map_location=None, img_size=None):
state_dict = torch.load(ckpt_path, map_location=map_location)
if "epoch" in state_dict:
return cls.load_from_checkpoint(ckpt_path, net_name=net_name, img_size=img_size, map_location=map_location)
else:
model = cls(net_name, img_size)
if any([k.startswith("net.") for k, v in state_dict.items()]):
model.load_state_dict(state_dict)
else:
model.net.load_state_dict(state_dict)
return model
def configure_optimizers(self):
optimizer = optim.Adam(self.net.parameters(), lr=self.lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
return optimizer
def forward(self, x):
if isinstance(self.net, ISNetDIS):
return self.net(x)[0][0].sigmoid()
if isinstance(self.net, ISNetGTEncoder):
return self.net(x)[0][0].sigmoid()
|
# warnings.filterwarnings("ignore")
net_names = ["isnet_is", "isnet", "isnet_gt", "u2net", "u2netl", "modnet", "inspyrnet_res", "inspyrnet_swin"]
def get_net(net_name, img_size):
if net_name == "isnet":
return ISNetDIS()
elif net_name == "isnet_is":
return ISNetDIS()
elif net_name == "isnet_gt":
return ISNetGTEncoder()
elif net_name == "u2net":
return U2NET_full2()
elif net_name == "u2netl":
return U2NET_lite2()
elif net_name == "modnet":
return MODNet()
elif net_name == "inspyrnet_res":
return InSPyReNet_Res2Net50(base_size=img_size)
elif net_name == "inspyrnet_swin":
return InSPyReNet_SwinB(base_size=img_size)
raise NotImplemented
class AnimeSegmentation(pl.LightningModule):
def __init__(self, net_name, img_size=None, lr=1e-3):
super().__init__()
assert net_name in net_names
self.img_size = img_size
self.lr = lr
self.net = get_net(net_name, img_size)
if net_name == "isnet_is":
self.gt_encoder = get_net("isnet_gt", img_size)
self.gt_encoder.requires_grad_(False)
else:
self.gt_encoder = None
@classmethod
def try_load(cls, net_name, ckpt_path, map_location=None, img_size=None):
state_dict = torch.load(ckpt_path, map_location=map_location)
if "epoch" in state_dict:
return cls.load_from_checkpoint(ckpt_path, net_name=net_name, img_size=img_size, map_location=map_location)
else:
model = cls(net_name, img_size)
if any([k.startswith("net.") for k, v in state_dict.items()]):
model.load_state_dict(state_dict)
else:
model.net.load_state_dict(state_dict)
return model
def configure_optimizers(self):
optimizer = optim.Adam(self.net.parameters(), lr=self.lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
return optimizer
def forward(self, x):
if isinstance(self.net, ISNetDIS):
return self.net(x)[0][0].sigmoid()
if isinstance(self.net, ISNetGTEncoder):
return self.net(x)[0][0].sigmoid() | elif isinstance(self.net, U2NET): | 6 | 2023-11-14 22:10:11+00:00 | 12k |
chuzhumin98/LLM_Eval | PRE/process.py | [
{
"identifier": "DataLoader",
"path": "PRE/data.py",
"snippet": "class DataLoader:\n '''\n The loader to load for evaluated task, with given prompt template to generate a series of prompts feeding for each LLM\n '''\n def __init__(self, args):\n self.path_data = args['path_data'] # th... | import os
import yaml
import json, csv
import copy
import sys
from PRE.data import DataLoader
from PRE.api import Auto_API
from PRE.exam import EXAM
from PRE.eval import PRE | 7,709 | '''
The procedure of the whole peer review framework
'''
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(base_dir)
class Process:
'''
The control of the whole peer review process
'''
@staticmethod
def run(args): # the API used for automatic evaluation
Process.collect_task_response(args)
qualified_apis, scores_qualified = Process.conduct_qualified_exam(args)
args['config_evaluators'] = qualified_apis
args['scores_evaluators'] = scores_qualified
# print(scores_qualified)
Process.peer_review_and_evaluate(args)
return None
@staticmethod
def collect_task_response(args):
path_config_api_evaluatee = args['config_api_evaluatee']
path_config_task_data = args['config_task_data']
task_name = args['task_name']
save_dir = args['save_dir'] # the task result save dir, the task save filename = [save_dir] / task_responses / [task_name]_[model_name].json, each line is one result with json {response: str}
os.makedirs(os.path.join(save_dir, "task_responses"), exist_ok=True)
if not os.path.exists(path_config_api_evaluatee):
raise FileExistsError("Load api_evaluatee config failed: file not exist!")
if not os.path.exists(path_config_task_data):
raise FileExistsError("Load task_data config failed: file not exist!")
config_apis = yaml.load_all(open(path_config_api_evaluatee, 'r'), Loader=yaml.FullLoader) # series of APIs
config_task = yaml.load(open(path_config_task_data, 'r'), Loader=yaml.FullLoader) # single task config
process_num = args['process_num'] # multi-process or not
| '''
The procedure of the whole peer review framework
'''
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(base_dir)
class Process:
'''
The control of the whole peer review process
'''
@staticmethod
def run(args): # the API used for automatic evaluation
Process.collect_task_response(args)
qualified_apis, scores_qualified = Process.conduct_qualified_exam(args)
args['config_evaluators'] = qualified_apis
args['scores_evaluators'] = scores_qualified
# print(scores_qualified)
Process.peer_review_and_evaluate(args)
return None
@staticmethod
def collect_task_response(args):
path_config_api_evaluatee = args['config_api_evaluatee']
path_config_task_data = args['config_task_data']
task_name = args['task_name']
save_dir = args['save_dir'] # the task result save dir, the task save filename = [save_dir] / task_responses / [task_name]_[model_name].json, each line is one result with json {response: str}
os.makedirs(os.path.join(save_dir, "task_responses"), exist_ok=True)
if not os.path.exists(path_config_api_evaluatee):
raise FileExistsError("Load api_evaluatee config failed: file not exist!")
if not os.path.exists(path_config_task_data):
raise FileExistsError("Load task_data config failed: file not exist!")
config_apis = yaml.load_all(open(path_config_api_evaluatee, 'r'), Loader=yaml.FullLoader) # series of APIs
config_task = yaml.load(open(path_config_task_data, 'r'), Loader=yaml.FullLoader) # single task config
process_num = args['process_num'] # multi-process or not
| data_loader = DataLoader(config_task) # a task data loader | 0 | 2023-11-16 18:40:23+00:00 | 12k |
believethehype/nostrdvm | nostr_dvm/utils/mediasource_utils.py | [
{
"identifier": "get_event_by_id",
"path": "nostr_dvm/utils/nostr_utils.py",
"snippet": "def get_event_by_id(event_id: str, client: Client, config=None) -> Event | None:\n split = event_id.split(\":\")\n if len(split) == 3:\n pk = PublicKey.from_hex(split[1])\n id_filter = Filter().a... | import os
import urllib
import ffmpegio
import requests
from datetime import time
from urllib.parse import urlparse
from decord import AudioReader, cpu
from nostr_dvm.utils.nostr_utils import get_event_by_id
from nostr_dvm.utils.scrapper.media_scrapper import OvercastDownload, XitterDownload, TiktokDownloadAll, \
InstagramDownload, YouTubeDownload
from moviepy.editor import VideoFileClip
from moviepy.editor import VideoFileClip | 9,121 |
def get_Instagram(input_value, start, end):
filepath = os.path.abspath(os.curdir + r'/outputs/')
try:
filename = download_instagram(input_value, filepath)
print(filename)
except Exception as e:
print(e)
return "", start, end
return filename, start, end
def get_Twitter(input_value, start, end):
filepath = os.path.abspath(os.curdir) + r'/outputs/'
cleanlink = str(input_value).replace("twitter.com", "x.com")
try:
filename = download_twitter(cleanlink, filepath)
except Exception as e:
print(e)
return "", start, end
return filename, start, end
def get_youtube(input_value, start, end, audioonly=True):
filepath = os.path.abspath(os.curdir) + r'/outputs/'
print(filepath)
filename = ""
try:
filename = download_youtube(input_value, filepath, audioonly)
except Exception as e:
print("Youtube " + str(e))
return filename, start, end
try:
o = urlparse(input_value)
q = urllib.parse.parse_qs(o.query)
if start == 0.0:
if o.query.find('?t=') != -1:
start = q['t'][0] # overwrite from link.. why not..
print("Setting start time automatically to " + start)
if end > 0.0:
end = float(q['t'][0]) + end
print("Moving end time automatically to " + str(end))
except Exception as e:
print(e)
return filename, start, end
return filename, start, end
def get_media_link(url) -> (str, str):
req = requests.get(url)
content_type = req.headers['content-type']
print(content_type)
if content_type == 'audio/x-wav' or str(url).lower().endswith(".wav"):
ext = "wav"
file_type = "audio"
with open(os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), 'wb') as fd:
fd.write(req.content)
return os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), file_type
elif content_type == 'audio/mpeg' or str(url).lower().endswith(".mp3"):
ext = "mp3"
file_type = "audio"
with open(os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), 'wb') as fd:
fd.write(req.content)
return os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), file_type
elif content_type == 'audio/ogg' or str(url).lower().endswith(".ogg"):
ext = "ogg"
file_type = "audio"
with open(os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), 'wb') as fd:
fd.write(req.content)
return os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), file_type
elif content_type == 'video/mp4' or str(url).lower().endswith(".mp4"):
ext = "mp4"
file_type = "video"
with open(os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), 'wb') as fd:
fd.write(req.content)
return os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), file_type
elif content_type == 'video/avi' or str(url).lower().endswith(".avi"):
ext = "avi"
file_type = "video"
with open(os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), 'wb') as fd:
fd.write(req.content)
return os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), file_type
elif content_type == 'video/quicktime' or str(url).lower().endswith(".mov"):
ext = "mov"
file_type = "video"
with open(os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), 'wb') as fd:
fd.write(req.content)
return os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), file_type
else:
print(str(url).lower())
return None, None
def download_overcast(source_url, target_location):
result = OvercastDownload(source_url, target_location)
return result
def download_twitter(videourl, path):
result = XitterDownload(videourl, path + "x.mp4")
return result
def download_tik_tok(videourl, path):
result = TiktokDownloadAll([videourl], path)
return result
def download_instagram(videourl, path):
result = InstagramDownload(videourl, "insta", path)
return result
def download_youtube(link, path, audioonly=True):
|
def input_data_file_duration(event, dvm_config, client, start=0, end=0):
# print("[" + dvm_config.NIP89.NAME + "] Getting Duration of the Media file..")
input_value = ""
input_type = ""
for tag in event.tags():
if tag.as_vec()[0] == 'i':
input_value = tag.as_vec()[1]
input_type = tag.as_vec()[2]
if input_type == "text":
return len(input_value)
if input_type == "event": # NIP94 event
evt = get_event_by_id(input_value, client=client, config=dvm_config)
if evt is not None:
input_value, input_type = check_nip94_event_for_media(evt, input_value, input_type)
if input_type == "text":
# For now, ingore length of any text, just return 1.
return len(input_value)
if input_type == "url":
source_type = check_source_type(input_value)
filename, start, end, type = get_file_start_end_type(input_value, source_type, start, end, True)
if type != "audio" and type != "video":
return 1
if filename == "" or filename is None:
return 0
try:
file_reader = AudioReader(filename, ctx=cpu(0), mono=False)
duration = float(file_reader.duration())
except Exception as e:
print(e)
return 0
print("Original Duration of the Media file: " + str(duration))
start_time, end_time, new_duration = (
convert_media_length(start, end, duration))
print("New Duration of the Media file: " + str(new_duration))
return new_duration
return 1
def organize_input_media_data(input_value, input_type, start, end, dvm_config, client, process=True,
media_format="audio/mp3") -> str:
if input_type == "event": # NIP94 event
evt = get_event_by_id(input_value, client=client, config=dvm_config)
if evt is not None:
input_value, input_type = check_nip94_event_for_media(evt, input_value, input_type)
if input_type == "url":
source_type = check_source_type(input_value)
audio_only = True
if media_format.split('/')[0] == "video":
audio_only = False
filename, start, end, type = get_file_start_end_type(input_value, source_type, start, end, audio_only)
if filename == "" or filename is None:
return ""
if type != "audio" and type != "video":
return filename
try:
file_reader = AudioReader(filename, ctx=cpu(0), mono=False)
duration = float(file_reader.duration())
except Exception as e:
print(e)
try:
clip = VideoFileClip(filename)
duration = clip.duration
except Exception as e:
print(e)
return ""
print("Original Duration of the Media file: " + str(duration))
start_time, end_time, new_duration = (
convert_media_length(start, end, duration))
print("New Duration of the Media file: " + str(new_duration))
# TODO if already in a working format and time is 0 0, dont convert
# for now, we cut and convert all files to mp3
if process:
# for now we cut and convert all files to mp3
file = r'processed.' + str(media_format.split('/')[1])
final_filename = os.path.abspath(os.curdir + r'/outputs/' + file)
if media_format.split('/')[0] == "audio":
print("Converting Audio from " + str(start_time) + " until " + str(end_time))
fs, x = ffmpegio.audio.read(filename, ss=start_time, to=end_time, sample_fmt='dbl', ac=1)
ffmpegio.audio.write(final_filename, fs, x, overwrite=True)
elif media_format.split('/')[0] == "video":
print("Converting Video from " + str(start_time) + " until " + str(end_time))
ffmpegio.transcode(filename, final_filename, overwrite=True, show_log=True)
elif media_format.split('/')[1] == "gif":
print("Converting Video from " + str(start_time) + " until " + str(end_time))
videoClip = VideoFileClip(filename)
videoClip.write_gif(final_filename, program="ffmpeg")
print(final_filename)
return final_filename
else:
return filename
def check_nip94_event_for_media(evt, input_value, input_type):
# Parse NIP94 event for url, if found, use it.
if evt.kind() == 1063:
for tag in evt.tags():
if tag.as_vec()[0] == 'url':
input_type = "url"
input_value = tag.as_vec()[1]
return input_value, input_type
return input_value, input_type
def convert_media_length(start: float, end: float, duration: float):
if end == 0.0:
end_time = duration
elif end > duration:
end_time = duration
else:
end_time = end
if start <= 0.0 or start > end_time:
start_time = 0.0
else:
start_time = start
dur = end_time - start_time
return start_time, end_time, dur
def get_file_start_end_type(url, source_type, start, end, audio_only=True) -> (str, str):
# Overcast
if source_type == "overcast":
name, start, end = get_overcast(url, start, end)
return name, start, end, "audio"
# Youtube
elif source_type == "youtube":
name, start, end = get_youtube(url, start, end, audio_only)
return name, start, end, "audio"
# Xitter
elif source_type == "xitter":
name, start, end = get_Twitter(url, start, end)
return name, start, end, "video"
# Tiktok
elif source_type == "tiktok":
name, start, end = get_TikTok(url, start, end)
return name, start, end, "video"
# Instagram
elif source_type == "instagram":
name, start, end = get_Instagram(url, start, end)
if name.endswith("jpg"):
type = "image"
else:
type = "video"
return name, start, end, type
# A file link
else:
filename, filetype = get_media_link(url)
return filename, start, end, filetype
def media_source(source_type):
if source_type == "overcast":
return "audio"
elif source_type == "youtube":
return "audio"
elif source_type == "xitter":
return "video"
elif source_type == "tiktok":
return "video"
elif source_type == "instagram":
return "video"
else:
return "url"
def check_source_type(url):
if str(url).startswith("https://overcast.fm/"):
return "overcast"
elif str(url).replace("http://", "").replace("https://", "").replace(
"www.", "").replace("youtu.be/", "youtube.com?v=")[0:11] == "youtube.com":
return "youtube"
elif str(url).startswith("https://x.com") or str(url).startswith("https://twitter.com"):
return "xitter"
elif str(url).startswith("https://vm.tiktok.com") or str(url).startswith(
"https://www.tiktok.com") or str(url).startswith("https://m.tiktok.com"):
return "tiktok"
elif str(url).startswith("https://www.instagram.com") or str(url).startswith(
"https://instagram.com"):
return "instagram"
else:
return "url"
def get_overcast(input_value, start, end):
filename = os.path.abspath(os.curdir + r'/outputs/originalaudio.mp3')
print("Found overcast.fm Link.. downloading")
start_time = start
end_time = end
download_overcast(input_value, filename)
finaltag = str(input_value).replace("https://overcast.fm/", "").split('/')
if start == 0.0:
if len(finaltag) > 1:
t = time.strptime(finaltag[1], "%H:%M:%S")
seconds = t.tm_hour * 60 * 60 + t.tm_min * 60 + t.tm_sec
start_time = float(seconds)
print("Setting start time automatically to " + str(start_time))
if end > 0.0:
end_time = float(seconds + end)
print("Moving end time automatically to " + str(end_time))
return filename, start_time, end_time
def get_TikTok(input_value, start, end):
filepath = os.path.abspath(os.curdir + r'/outputs/')
try:
filename = download_tik_tok(input_value, filepath)
print(filename)
except Exception as e:
print(e)
return "", start, end
return filename, start, end
def get_Instagram(input_value, start, end):
filepath = os.path.abspath(os.curdir + r'/outputs/')
try:
filename = download_instagram(input_value, filepath)
print(filename)
except Exception as e:
print(e)
return "", start, end
return filename, start, end
def get_Twitter(input_value, start, end):
filepath = os.path.abspath(os.curdir) + r'/outputs/'
cleanlink = str(input_value).replace("twitter.com", "x.com")
try:
filename = download_twitter(cleanlink, filepath)
except Exception as e:
print(e)
return "", start, end
return filename, start, end
def get_youtube(input_value, start, end, audioonly=True):
filepath = os.path.abspath(os.curdir) + r'/outputs/'
print(filepath)
filename = ""
try:
filename = download_youtube(input_value, filepath, audioonly)
except Exception as e:
print("Youtube " + str(e))
return filename, start, end
try:
o = urlparse(input_value)
q = urllib.parse.parse_qs(o.query)
if start == 0.0:
if o.query.find('?t=') != -1:
start = q['t'][0] # overwrite from link.. why not..
print("Setting start time automatically to " + start)
if end > 0.0:
end = float(q['t'][0]) + end
print("Moving end time automatically to " + str(end))
except Exception as e:
print(e)
return filename, start, end
return filename, start, end
def get_media_link(url) -> (str, str):
req = requests.get(url)
content_type = req.headers['content-type']
print(content_type)
if content_type == 'audio/x-wav' or str(url).lower().endswith(".wav"):
ext = "wav"
file_type = "audio"
with open(os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), 'wb') as fd:
fd.write(req.content)
return os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), file_type
elif content_type == 'audio/mpeg' or str(url).lower().endswith(".mp3"):
ext = "mp3"
file_type = "audio"
with open(os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), 'wb') as fd:
fd.write(req.content)
return os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), file_type
elif content_type == 'audio/ogg' or str(url).lower().endswith(".ogg"):
ext = "ogg"
file_type = "audio"
with open(os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), 'wb') as fd:
fd.write(req.content)
return os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), file_type
elif content_type == 'video/mp4' or str(url).lower().endswith(".mp4"):
ext = "mp4"
file_type = "video"
with open(os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), 'wb') as fd:
fd.write(req.content)
return os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), file_type
elif content_type == 'video/avi' or str(url).lower().endswith(".avi"):
ext = "avi"
file_type = "video"
with open(os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), 'wb') as fd:
fd.write(req.content)
return os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), file_type
elif content_type == 'video/quicktime' or str(url).lower().endswith(".mov"):
ext = "mov"
file_type = "video"
with open(os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), 'wb') as fd:
fd.write(req.content)
return os.path.abspath(os.curdir + r'/outputs/' + 'file.' + ext), file_type
else:
print(str(url).lower())
return None, None
def download_overcast(source_url, target_location):
result = OvercastDownload(source_url, target_location)
return result
def download_twitter(videourl, path):
result = XitterDownload(videourl, path + "x.mp4")
return result
def download_tik_tok(videourl, path):
result = TiktokDownloadAll([videourl], path)
return result
def download_instagram(videourl, path):
result = InstagramDownload(videourl, "insta", path)
return result
def download_youtube(link, path, audioonly=True): | return YouTubeDownload(link, path, audio_only=audioonly) | 5 | 2023-11-17 18:32:56+00:00 | 12k |
embrake/Aquilify | aquilify/wrappers/request.py | [
{
"identifier": "AwaitableOrContextManager",
"path": "aquilify/utils/_utils.py",
"snippet": "class AwaitableOrContextManager(Protocol[T_co]):\n def __await__(self) -> typing.Generator[typing.Any, None, T_co]:\n ... # pragma: no cover\n\n async def __aenter__(self) -> T_co:\n ... # ... | import json
import typing
import anyio
from http import cookies as http_cookies
from urllib.parse import parse_qs
from ..utils._utils import AwaitableOrContextManager, AwaitableOrContextManagerWrapper
from ..datastructure.core import URL, Address, FormData, Headers, State
from ..exception.http_exception import HTTPException
from ..datastructure.formparser import FormParser, MultiPartException, MultiPartParser
from ..datastructure.user_agent import UserAgentParser
from ..types import Message, Receive, Scope, Send
from multipart.multipart import parse_options_header | 8,788 | @property
def auth(self) -> typing.Any:
assert (
"auth" in self.scope
), "AuthenticationMiddleware must be installed to access request.auth"
return self.scope["auth"]
@property
def user(self) -> typing.Any:
assert (
"user" in self.scope
), "AuthenticationMiddleware must be installed to access request.user"
return self.scope["user"]
@property
def state(self) -> State:
if not hasattr(self, "_state"):
self.scope.setdefault("state", {})
self._state = State(self.scope["state"])
return self._state
async def empty_receive() -> typing.NoReturn:
raise RuntimeError("Receive channel has not been made available")
async def empty_send(message: Message) -> typing.NoReturn:
raise RuntimeError("Send channel has not been made available")
class Request(HTTPConnection):
_form: typing.Optional[FormData]
def __init__(
self, scope: Scope, receive: Receive = empty_receive, send: Send = empty_send
):
super().__init__(scope)
assert scope["type"] == "http"
self._receive = receive
self._send = send
self._stream_consumed = False
self._is_disconnected = False
self._form = None
self.path_params = None
self.query_params: typing.Dict[str, str] = {}
self.context: typing.Dict[str, str] = {}
self.executed_middlewares = set()
@property
def method(self) -> str:
return self.scope["method"]
@property
def args(self) -> typing.Dict[str, str]:
self._parse_query_params()
return self.query_params
@property
def receive(self) -> Receive:
return self._receive
async def stream(self) -> typing.AsyncGenerator[bytes, None]:
if hasattr(self, "_body"):
yield self._body
yield b""
return
if self._stream_consumed:
raise RuntimeError("Stream consumed")
self._stream_consumed = True
while True:
message = await self._receive()
if message["type"] == "http.request":
body = message.get("body", b"")
if body:
yield body
if not message.get("more_body", False):
break
elif message["type"] == "http.disconnect":
self._is_disconnected = True
yield b""
async def body(self) -> bytes:
if not hasattr(self, "_body"):
chunks: "typing.List[bytes]" = []
async for chunk in self.stream():
chunks.append(chunk)
self._body = b"".join(chunks)
return self._body
async def json(self) -> typing.Any:
if not hasattr(self, "_json"):
body = await self.body()
self._json = json.loads(body)
return self._json
def _parse_query_params(self):
query_string = self.scope.get('query_string', b'').decode('utf-8')
self.query_params = {k: v[0] for k, v in parse_qs(query_string).items()}
async def _get_form(
self,
*,
max_files: typing.Union[int, float] = 1000,
max_fields: typing.Union[int, float] = 1000,
) -> FormData:
if self._form is None:
assert (
parse_options_header is not None
), "The `python-multipart` library must be installed to use form parsing."
content_type_header = self.headers.get("Content-Type")
content_type: bytes
content_type, _ = parse_options_header(content_type_header)
if content_type == b"multipart/form-data":
try:
multipart_parser = MultiPartParser(
self.headers,
self.stream(),
max_files=max_files,
max_fields=max_fields,
)
self._form = await multipart_parser.parse()
|
try:
except ModuleNotFoundError:
parse_options_header = None
SERVER_PUSH_HEADERS_TO_COPY = {
"accept",
"accept-encoding",
"accept-language",
"cache-control",
"user-agent",
}
def cookie_parser(cookie_string: str) -> typing.Dict[str, str]:
cookie_dict: typing.Dict[str, str] = {}
for chunk in cookie_string.split(";"):
if "=" in chunk:
key, val = chunk.split("=", 1)
else:
key, val = "", chunk
key, val = key.strip(), val.strip()
if key or val:
cookie_dict[key] = http_cookies._unquote(val)
return cookie_dict
class ClientDisconnect(Exception):
pass
class HTTPConnection(typing.Mapping[str, typing.Any]):
def __init__(self, scope: Scope, receive: typing.Optional[Receive] = None) -> None:
assert scope["type"] in ("http", "websocket")
self.scope = scope
def __getitem__(self, key: str) -> typing.Any:
return self.scope[key]
def __iter__(self) -> typing.Iterator[str]:
return iter(self.scope)
def __len__(self) -> int:
return len(self.scope)
__eq__ = object.__eq__
__hash__ = object.__hash__
@property
def app(self) -> typing.Any:
return self.scope["app"]
@property
def url(self) -> URL:
if not hasattr(self, "_url"):
self._url = URL(scope=self.scope)
return self._url
@property
def base_url(self) -> URL:
if not hasattr(self, "_base_url"):
base_url_scope = dict(self.scope)
base_url_scope["path"] = "/"
base_url_scope["query_string"] = b""
base_url_scope["root_path"] = base_url_scope.get(
"app_root_path", base_url_scope.get("root_path", "")
)
self._base_url = URL(scope=base_url_scope)
return self._base_url
@property
def headers(self) -> Headers:
if not hasattr(self, "_headers"):
self._headers = Headers(scope=self.scope)
return self._headers
@property
def origin(self):
return self.headers.get('origin')
@property
def remote_addr(self) -> str:
return self.scope.get('client', ('',))[0]
@property
def scheme(self) -> str:
return self.scope.get('scheme', 'http')
@property
def server(self) -> typing.Dict[str, str]:
return {
'server_protocol': self.scope.get('server_protocol', ''),
'server_name': self.scope.get('server_name', ''),
'server_port': self.scope.get('server_port', ''),
}
@property
def authorization(self) -> typing.Optional[str]:
return self.headers.get('authorization')
@property
def user_agent(self) -> UserAgentParser:
return UserAgentParser(self.headers.get('user-agent', ''))
@property
def referer(self) -> str:
return self.headers.get('referer', '')
@property
def accept(self) -> str:
return self.headers.get('accept', '')
@property
def host(self) -> str:
return self.headers.get('host')
@property
def path(self) -> str:
return self.scope.get('path', '/')
@property
def path_param(self) -> typing.Dict[str, typing.Any]:
return self.scope.get("path_params", {})
@property
def cookies(self) -> typing.Dict[str, str]:
if not hasattr(self, "_cookies"):
cookies: typing.Dict[str, str] = {}
cookie_header = self.headers.get("cookie")
if cookie_header:
cookies = cookie_parser(cookie_header)
self._cookies = cookies
return self._cookies
@property
def client(self) -> typing.Optional[Address]:
# client is a 2 item tuple of (host, port), None or missing
host_port = self.scope.get("client")
if host_port is not None:
return Address(*host_port)
return None
@property
def session(self) -> typing.Dict[str, typing.Any]:
assert (
"session" in self.scope
), "SessionMiddleware must be installed to access request.session"
return self.scope["session"]
@property
def auth(self) -> typing.Any:
assert (
"auth" in self.scope
), "AuthenticationMiddleware must be installed to access request.auth"
return self.scope["auth"]
@property
def user(self) -> typing.Any:
assert (
"user" in self.scope
), "AuthenticationMiddleware must be installed to access request.user"
return self.scope["user"]
@property
def state(self) -> State:
if not hasattr(self, "_state"):
self.scope.setdefault("state", {})
self._state = State(self.scope["state"])
return self._state
async def empty_receive() -> typing.NoReturn:
raise RuntimeError("Receive channel has not been made available")
async def empty_send(message: Message) -> typing.NoReturn:
raise RuntimeError("Send channel has not been made available")
class Request(HTTPConnection):
_form: typing.Optional[FormData]
def __init__(
self, scope: Scope, receive: Receive = empty_receive, send: Send = empty_send
):
super().__init__(scope)
assert scope["type"] == "http"
self._receive = receive
self._send = send
self._stream_consumed = False
self._is_disconnected = False
self._form = None
self.path_params = None
self.query_params: typing.Dict[str, str] = {}
self.context: typing.Dict[str, str] = {}
self.executed_middlewares = set()
@property
def method(self) -> str:
return self.scope["method"]
@property
def args(self) -> typing.Dict[str, str]:
self._parse_query_params()
return self.query_params
@property
def receive(self) -> Receive:
return self._receive
async def stream(self) -> typing.AsyncGenerator[bytes, None]:
if hasattr(self, "_body"):
yield self._body
yield b""
return
if self._stream_consumed:
raise RuntimeError("Stream consumed")
self._stream_consumed = True
while True:
message = await self._receive()
if message["type"] == "http.request":
body = message.get("body", b"")
if body:
yield body
if not message.get("more_body", False):
break
elif message["type"] == "http.disconnect":
self._is_disconnected = True
yield b""
async def body(self) -> bytes:
if not hasattr(self, "_body"):
chunks: "typing.List[bytes]" = []
async for chunk in self.stream():
chunks.append(chunk)
self._body = b"".join(chunks)
return self._body
async def json(self) -> typing.Any:
if not hasattr(self, "_json"):
body = await self.body()
self._json = json.loads(body)
return self._json
def _parse_query_params(self):
query_string = self.scope.get('query_string', b'').decode('utf-8')
self.query_params = {k: v[0] for k, v in parse_qs(query_string).items()}
async def _get_form(
self,
*,
max_files: typing.Union[int, float] = 1000,
max_fields: typing.Union[int, float] = 1000,
) -> FormData:
if self._form is None:
assert (
parse_options_header is not None
), "The `python-multipart` library must be installed to use form parsing."
content_type_header = self.headers.get("Content-Type")
content_type: bytes
content_type, _ = parse_options_header(content_type_header)
if content_type == b"multipart/form-data":
try:
multipart_parser = MultiPartParser(
self.headers,
self.stream(),
max_files=max_files,
max_fields=max_fields,
)
self._form = await multipart_parser.parse() | except MultiPartException as exc: | 9 | 2023-11-16 08:26:02+00:00 | 12k |
IBM/oper8 | tests/test_component.py | [
{
"identifier": "constants",
"path": "oper8/constants.py",
"snippet": "PAUSE_ANNOTATION_NAME = \"oper8.org/pause-execution\"\nCONFIG_DEFAULTS_ANNOTATION_NAME = \"oper8.org/config-defaults\"\nLEASE_NAME_ANNOTATION_NAME = \"oper8.org/lease-name\"\nLEASE_TIME_ANNOTATION_NAME = \"oper8.org/lease-time\"\nLOG... | import os
import tempfile
import pytest
import yaml
import alog
from oper8 import constants
from oper8.component import Component, ManagedObject
from oper8.constants import TEMPORARY_PATCHES_ANNOTATION_NAME
from oper8.deploy_manager.dry_run_deploy_manager import DryRunDeployManager
from oper8.deploy_manager.owner_references import _make_owner_reference
from oper8.patch import STRATEGIC_MERGE_PATCH
from oper8.test_helpers.helpers import (
TEST_NAMESPACE,
DummyNodeComponent,
MockDeployManager,
configure_logging,
library_config,
make_patch,
setup_session,
)
from oper8.utils import merge_configs | 7,332 | """
Test the implementations of the default functions in Component
"""
# Standard
# Third Party
# First Party
# Local
configure_logging()
log = alog.use_channel("TEST")
def get_comp_type(name="dummy"):
"""Paramterization helper to get test both standard and legacy components.
This function also wraps the output class type so that name class attributes
are not polluted.
"""
given_name = name
class Wrapped(DummyNodeComponent):
name = given_name
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
return Wrapped
################################################################################
## Tests #######################################################################
################################################################################
##################
## Construction ##
##################
def test_name_validation():
"""Validate that the name validation passes for a class with a valid
name class attribute
"""
class NamedComponent(Component):
name = "foo"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def build_chart(self, session):
pass
class UnnamedComponent(Component):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def build_chart(self, session):
pass
session = setup_session()
component = NamedComponent(session=session)
assert component.name == NamedComponent.name
with pytest.raises(NotImplementedError):
UnnamedComponent(session=session)
def test_scope():
"""Validate that the scope given to the constructor gets wired into the
component's hierarchy correctly
"""
session = setup_session()
comp = get_comp_type()(session=session)
assert session.get_component(comp.name)
##################
## render_chart ##
##################
def test_managed_objects():
"""Make sure managed_objects matches the objects generated by the
chart
"""
session = setup_session()
comp = get_comp_type()(
session=session,
api_objects=[
("bar", {"kind": "Foo", "metadata": {"name": "bar"}}),
("bat", {"kind": "Baz", "metadata": {"name": "bat"}}),
],
)
comp.render_chart(session)
managed_objs = comp.managed_objects
assert len(managed_objs) == 2
assert managed_objs[0].kind == "Foo"
assert managed_objs[1].kind == "Baz"
def test_apply_patches_ok():
"""Make sure applying patches modifies the managed objects as expected"""
patch_name = "test"
| """
Test the implementations of the default functions in Component
"""
# Standard
# Third Party
# First Party
# Local
configure_logging()
log = alog.use_channel("TEST")
def get_comp_type(name="dummy"):
"""Paramterization helper to get test both standard and legacy components.
This function also wraps the output class type so that name class attributes
are not polluted.
"""
given_name = name
class Wrapped(DummyNodeComponent):
name = given_name
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
return Wrapped
################################################################################
## Tests #######################################################################
################################################################################
##################
## Construction ##
##################
def test_name_validation():
"""Validate that the name validation passes for a class with a valid
name class attribute
"""
class NamedComponent(Component):
name = "foo"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def build_chart(self, session):
pass
class UnnamedComponent(Component):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def build_chart(self, session):
pass
session = setup_session()
component = NamedComponent(session=session)
assert component.name == NamedComponent.name
with pytest.raises(NotImplementedError):
UnnamedComponent(session=session)
def test_scope():
"""Validate that the scope given to the constructor gets wired into the
component's hierarchy correctly
"""
session = setup_session()
comp = get_comp_type()(session=session)
assert session.get_component(comp.name)
##################
## render_chart ##
##################
def test_managed_objects():
"""Make sure managed_objects matches the objects generated by the
chart
"""
session = setup_session()
comp = get_comp_type()(
session=session,
api_objects=[
("bar", {"kind": "Foo", "metadata": {"name": "bar"}}),
("bat", {"kind": "Baz", "metadata": {"name": "bat"}}),
],
)
comp.render_chart(session)
managed_objs = comp.managed_objects
assert len(managed_objs) == 2
assert managed_objs[0].kind == "Foo"
assert managed_objs[1].kind == "Baz"
def test_apply_patches_ok():
"""Make sure applying patches modifies the managed objects as expected"""
patch_name = "test" | patch = make_patch( | 11 | 2023-11-15 16:43:29+00:00 | 12k |
smrfeld/tsmixer-pytorch | main.py | [
{
"identifier": "plot_preds",
"path": "utils/plotting.py",
"snippet": "def plot_preds(preds: List[List[float]], preds_gt: List[List[float]], no_feats_plot: int, fname_save: Optional[str] = None, inputs: Optional[List[List[float]]] = None, show: bool = True):\n \"\"\"Plot predictions\n\n Args:\n ... | from utils import TSMixer, plot_preds, plot_loss, TSMixerConf, TSMixerGridSearch
import argparse
import yaml
import os | 7,320 |
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--command", type=str, required=True, choices=["train", "predict", "loss", "grid-search"], help="Command to run")
parser.add_argument("--conf", type=str, required=False, help="Path to the configuration file")
parser.add_argument("--no-feats-plot", type=int, required=False, default=6, help="Number of features to plot")
parser.add_argument("--show", action="store_true", required=False, help="Show plots")
args = parser.parse_args()
if args.command == "train":
# Load configuration
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
tsmixer = TSMixer(conf)
# Train
tsmixer.train()
elif args.command == "predict":
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
# Load best checkpoint
conf.initialize = TSMixerConf.Initialize.FROM_BEST_CHECKPOINT
tsmixer = TSMixer(conf)
# Predict on validation dataset
data = tsmixer.predict_val_dataset(max_samples=10, save_inputs=False)
# Plot predictions
data_plt = data[0]
assert args.no_feats_plot is not None, "Must provide number of features to plot"
plot_preds(
preds=data_plt.pred,
preds_gt=data_plt.pred_gt,
no_feats_plot=args.no_feats_plot,
show=args.show,
fname_save=os.path.join(conf.image_dir, "preds.png")
)
elif args.command == "loss":
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
train_data = conf.load_training_metadata_or_new()
plot_loss(
train_data=train_data,
show=args.show,
fname_save=os.path.join(conf.image_dir, "loss.png")
)
elif args.command == "grid-search":
# Load configuration
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
|
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--command", type=str, required=True, choices=["train", "predict", "loss", "grid-search"], help="Command to run")
parser.add_argument("--conf", type=str, required=False, help="Path to the configuration file")
parser.add_argument("--no-feats-plot", type=int, required=False, default=6, help="Number of features to plot")
parser.add_argument("--show", action="store_true", required=False, help="Show plots")
args = parser.parse_args()
if args.command == "train":
# Load configuration
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
tsmixer = TSMixer(conf)
# Train
tsmixer.train()
elif args.command == "predict":
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
# Load best checkpoint
conf.initialize = TSMixerConf.Initialize.FROM_BEST_CHECKPOINT
tsmixer = TSMixer(conf)
# Predict on validation dataset
data = tsmixer.predict_val_dataset(max_samples=10, save_inputs=False)
# Plot predictions
data_plt = data[0]
assert args.no_feats_plot is not None, "Must provide number of features to plot"
plot_preds(
preds=data_plt.pred,
preds_gt=data_plt.pred_gt,
no_feats_plot=args.no_feats_plot,
show=args.show,
fname_save=os.path.join(conf.image_dir, "preds.png")
)
elif args.command == "loss":
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
train_data = conf.load_training_metadata_or_new()
plot_loss(
train_data=train_data,
show=args.show,
fname_save=os.path.join(conf.image_dir, "loss.png")
)
elif args.command == "grid-search":
# Load configuration
assert args.conf is not None, "Must provide a configuration file"
with open(args.conf, "r") as f: | conf_grid_search = TSMixerGridSearch.from_dict(yaml.safe_load(f)) | 3 | 2023-11-18 19:56:18+00:00 | 12k |
Jisencc/yolov5_dual_weighting | utils/dataloaders.py | [
{
"identifier": "Albumentations",
"path": "utils/augmentations.py",
"snippet": "class Albumentations:\n # YOLOv5 Albumentations class (optional, only used if package is installed)\n def __init__(self, size=640):\n self.transform = None\n prefix = colorstr('albumentations: ')\n ... | import contextlib
import glob
import hashlib
import json
import math
import os
import random
import shutil
import time
import numpy as np
import psutil
import torch
import torch.nn.functional as F
import torchvision
import yaml
import mss
import pafy
from itertools import repeat
from multiprocessing.pool import Pool, ThreadPool
from pathlib import Path
from threading import Thread
from urllib.parse import urlparse
from PIL import ExifTags, Image, ImageOps
from torch.utils.data import DataLoader, Dataset, dataloader, distributed
from tqdm import tqdm
from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste,
letterbox, mixup, random_perspective)
from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, check_dataset, check_requirements,
check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy,
xywh2xyxy, xywhn2xyxy, xyxy2xywhn)
from utils.torch_utils import torch_distributed_zero_first | 9,569 | for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.dataloaders import *; autosplit()
Arguments
path: Path to images directory
weights: Train, val, test weights (list, tuple)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only
n = len(files) # number of files
random.seed(0) # for reproducibility
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
for x in txt:
if (path.parent / x).exists():
(path.parent / x).unlink() # remove existing
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path.parent / txt[i], 'a') as f:
f.write(f'./{img.relative_to(path.parent).as_posix()}' + '\n') # add image to txt file
def verify_image_label(args):
# Verify one image-label pair
im_file, lb_file, prefix = args
nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'
if im.format.lower() in ('jpg', 'jpeg'):
with open(im_file, 'rb') as f:
f.seek(-2, 2)
if f.read() != b'\xff\xd9': # corrupt JPEG
ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100)
msg = f'{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved'
# verify labels
if os.path.isfile(lb_file):
nf = 1 # label found
with open(lb_file) as f:
lb = [x.split() for x in f.read().strip().splitlines() if len(x)]
if any(len(x) > 6 for x in lb): # is segment
classes = np.array([x[0] for x in lb], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...)
lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
lb = np.array(lb, dtype=np.float32)
nl = len(lb)
if nl:
assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected'
assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}'
assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}'
_, i = np.unique(lb, axis=0, return_index=True)
if len(i) < nl: # duplicate row check
lb = lb[i] # remove duplicates
if segments:
segments = [segments[x] for x in i]
msg = f'{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed'
else:
ne = 1 # label empty
lb = np.zeros((0, 5), dtype=np.float32)
else:
nm = 1 # label missing
lb = np.zeros((0, 5), dtype=np.float32)
return im_file, lb, shape, segments, nm, nf, ne, nc, msg
except Exception as e:
nc = 1
msg = f'{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}'
return [None, None, None, None, nm, nf, ne, nc, msg]
class HUBDatasetStats():
""" Class for generating HUB dataset JSON and `-hub` dataset directory
Arguments
path: Path to data.yaml or data.zip (with data.yaml inside data.zip)
autodownload: Attempt to download dataset if not found locally
Usage
from utils.dataloaders import HUBDatasetStats
stats = HUBDatasetStats('coco128.yaml', autodownload=True) # usage 1
stats = HUBDatasetStats('path/to/coco128.zip') # usage 2
stats.get_json(save=False)
stats.process_images()
"""
def __init__(self, path='coco128.yaml', autodownload=False):
# Initialize class
zipped, data_dir, yaml_path = self._unzip(Path(path))
try:
with open(check_yaml(yaml_path), errors='ignore') as f:
data = yaml.safe_load(f) # data dict
if zipped:
data['path'] = data_dir
except Exception as e:
raise Exception('error/HUB/dataset_stats/yaml_load') from e
| # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
"""
Dataloaders and dataset utils
"""
# Parameters
HELP_URL = 'See https://docs.ultralytics.com/yolov5/tutorials/train_custom_data'
IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes
VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes
LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
RANK = int(os.getenv('RANK', -1))
PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(paths):
# Returns a single hash value of a list of paths (files or dirs)
size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes
h = hashlib.sha256(str(size).encode()) # hash sizes
h.update(''.join(paths).encode()) # hash paths
return h.hexdigest() # return hash
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
with contextlib.suppress(Exception):
rotation = dict(img._getexif().items())[orientation]
if rotation in [6, 8]: # rotation 270 or 90
s = (s[1], s[0])
return s
def exif_transpose(image):
"""
Transpose a PIL image accordingly if it has an EXIF Orientation tag.
Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose()
:param image: The image to transpose.
:return: An image.
"""
exif = image.getexif()
orientation = exif.get(0x0112, 1) # default 1
if orientation > 1:
method = {
2: Image.FLIP_LEFT_RIGHT,
3: Image.ROTATE_180,
4: Image.FLIP_TOP_BOTTOM,
5: Image.TRANSPOSE,
6: Image.ROTATE_270,
7: Image.TRANSVERSE,
8: Image.ROTATE_90}.get(orientation)
if method is not None:
image = image.transpose(method)
del exif[0x0112]
image.info['exif'] = exif.tobytes()
return image
def seed_worker(worker_id):
# Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader
worker_seed = torch.initial_seed() % 2 ** 32
np.random.seed(worker_seed)
random.seed(worker_seed)
def create_dataloader(path,
imgsz,
batch_size,
stride,
single_cls=False,
hyp=None,
augment=False,
cache=False,
pad=0.0,
rect=False,
rank=-1,
workers=8,
image_weights=False,
quad=False,
prefix='',
shuffle=False,
seed=0):
if rect and shuffle:
LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False')
shuffle = False
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
dataset = LoadImagesAndLabels(
path,
imgsz,
batch_size,
augment=augment, # augmentation
hyp=hyp, # hyperparameters
rect=rect, # rectangular batches
cache_images=cache,
single_cls=single_cls,
stride=int(stride),
pad=pad,
image_weights=image_weights,
prefix=prefix)
batch_size = min(batch_size, len(dataset))
nd = torch.cuda.device_count() # number of CUDA devices
nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers
sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates
generator = torch.Generator()
generator.manual_seed(6148914691236517205 + seed + RANK)
return loader(dataset,
batch_size=batch_size,
shuffle=shuffle and sampler is None,
num_workers=nw,
sampler=sampler,
pin_memory=PIN_MEMORY,
collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn,
worker_init_fn=seed_worker,
generator=generator), dataset
class InfiniteDataLoader(dataloader.DataLoader):
""" Dataloader that reuses workers
Uses same syntax as vanilla DataLoader
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for _ in range(len(self)):
yield next(self.iterator)
class _RepeatSampler:
""" Sampler that repeats forever
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class LoadScreenshots:
# YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"`
def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None):
# source = [screen_number left top width height] (pixels)
check_requirements('mss')
source, *params = source.split()
self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0
if len(params) == 1:
self.screen = int(params[0])
elif len(params) == 4:
left, top, width, height = (int(x) for x in params)
elif len(params) == 5:
self.screen, left, top, width, height = (int(x) for x in params)
self.img_size = img_size
self.stride = stride
self.transforms = transforms
self.auto = auto
self.mode = 'stream'
self.frame = 0
self.sct = mss.mss()
# Parse monitor shape
monitor = self.sct.monitors[self.screen]
self.top = monitor['top'] if top is None else (monitor['top'] + top)
self.left = monitor['left'] if left is None else (monitor['left'] + left)
self.width = width or monitor['width']
self.height = height or monitor['height']
self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height}
def __iter__(self):
return self
def __next__(self):
# mss screen capture: get raw pixels from the screen as np array
im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR
s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: '
if self.transforms:
im = self.transforms(im0) # transforms
else:
im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize
im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
im = np.ascontiguousarray(im) # contiguous
self.frame += 1
return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s
class LoadImages:
# YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`
def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line
path = Path(path).read_text().rsplit()
files = []
for p in sorted(path) if isinstance(path, (list, tuple)) else [path]:
p = str(Path(p).resolve())
if '*' in p:
files.extend(sorted(glob.glob(p, recursive=True))) # glob
elif os.path.isdir(p):
files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir
elif os.path.isfile(p):
files.append(p) # files
else:
raise FileNotFoundError(f'{p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]
videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
self.auto = auto
self.transforms = transforms # optional
self.vid_stride = vid_stride # video frame-rate stride
if any(videos):
self._new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
for _ in range(self.vid_stride):
self.cap.grab()
ret_val, im0 = self.cap.retrieve()
while not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
path = self.files[self.count]
self._new_video(path)
ret_val, im0 = self.cap.read()
self.frame += 1
# im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False
s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '
else:
# Read image
self.count += 1
im0 = cv2.imread(path) # BGR
assert im0 is not None, f'Image Not Found {path}'
s = f'image {self.count}/{self.nf} {path}: '
if self.transforms:
im = self.transforms(im0) # transforms
else:
im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize
im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
im = np.ascontiguousarray(im) # contiguous
return path, im, im0, self.cap, s
def _new_video(self, path):
# Create a new video capture object
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)
self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees
# self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493
def _cv2_rotate(self, im):
# Rotate a cv2 video manually
if self.orientation == 0:
return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE)
elif self.orientation == 180:
return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE)
elif self.orientation == 90:
return cv2.rotate(im, cv2.ROTATE_180)
return im
def __len__(self):
return self.nf # number of files
class LoadStreams:
# YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`
def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):
torch.backends.cudnn.benchmark = True # faster for fixed-size inference
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
self.vid_stride = vid_stride # video frame-rate stride
sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources]
n = len(sources)
self.sources = [clean_str(x) for x in sources] # clean source names for later
self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n
for i, s in enumerate(sources): # index, source
# Start thread to read frames from video stream
st = f'{i + 1}/{n}: {s}... '
if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video
# YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/LNwODJXcvt4'
check_requirements(('pafy', 'youtube_dl==2020.12.2'))
s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL
s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam
if s == 0:
assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.'
assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.'
cap = cv2.VideoCapture(s)
assert cap.isOpened(), f'{st}Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan
self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback
self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback
_, self.imgs[i] = cap.read() # guarantee first frame
self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)
LOGGER.info(f'{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)')
self.threads[i].start()
LOGGER.info('') # newline
# check for common shapes
s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs])
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
self.auto = auto and self.rect
self.transforms = transforms # optional
if not self.rect:
LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.')
def update(self, i, cap, stream):
# Read stream `i` frames in daemon thread
n, f = 0, self.frames[i] # frame number, frame array
while cap.isOpened() and n < f:
n += 1
cap.grab() # .read() = .grab() followed by .retrieve()
if n % self.vid_stride == 0:
success, im = cap.retrieve()
if success:
self.imgs[i] = im
else:
LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.')
self.imgs[i] = np.zeros_like(self.imgs[i])
cap.open(stream) # re-open stream if signal was lost
time.sleep(0.0) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
im0 = self.imgs.copy()
if self.transforms:
im = np.stack([self.transforms(x) for x in im0]) # transforms
else:
im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize
im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW
im = np.ascontiguousarray(im) # contiguous
return self.sources, im, im0, None, ''
def __len__(self):
return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
def img2label_paths(img_paths):
# Define label paths as a function of image paths
sa, sb = f'{os.sep}images{os.sep}', f'{os.sep}labels{os.sep}' # /images/, /labels/ substrings
return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths]
class LoadImagesAndLabels(Dataset):
# YOLOv5 train_loader/val_loader, loads images and labels for training and validation
cache_version = 0.6 # dataset labels *.cache version
rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4]
def __init__(self,
path,
img_size=640,
batch_size=16,
augment=False,
hyp=None,
rect=False,
image_weights=False,
cache_images=False,
single_cls=False,
stride=32,
pad=0.0,
min_items=0,
prefix=''):
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
self.path = path
self.albumentations = Albumentations(size=img_size) if augment else None
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = Path(p) # os-agnostic
if p.is_dir(): # dir
f += glob.glob(str(p / '**' / '*.*'), recursive=True)
# f = list(p.rglob('*.*')) # pathlib
elif p.is_file(): # file
with open(p) as t:
t = t.read().strip().splitlines()
parent = str(p.parent) + os.sep
f += [x.replace('./', parent, 1) if x.startswith('./') else x for x in t] # to global path
# f += [p.parent / x.lstrip(os.sep) for x in t] # to global path (pathlib)
else:
raise FileNotFoundError(f'{prefix}{p} does not exist')
self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS)
# self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib
assert self.im_files, f'{prefix}No images found'
except Exception as e:
raise Exception(f'{prefix}Error loading data from {path}: {e}\n{HELP_URL}') from e
# Check cache
self.label_files = img2label_paths(self.im_files) # labels
cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')
try:
cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict
assert cache['version'] == self.cache_version # matches current version
assert cache['hash'] == get_hash(self.label_files + self.im_files) # identical hash
except Exception:
cache, exists = self.cache_labels(cache_path, prefix), False # run cache ops
# Display cache
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total
if exists and LOCAL_RANK in {-1, 0}:
d = f'Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt'
tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT) # display cache results
if cache['msgs']:
LOGGER.info('\n'.join(cache['msgs'])) # display warnings
assert nf > 0 or not augment, f'{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}'
# Read cache
[cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
labels, shapes, self.segments = zip(*cache.values())
nl = len(np.concatenate(labels, 0)) # number of labels
assert nl > 0 or not augment, f'{prefix}All labels empty in {cache_path}, can not start training. {HELP_URL}'
self.labels = list(labels)
self.shapes = np.array(shapes)
self.im_files = list(cache.keys()) # update
self.label_files = img2label_paths(cache.keys()) # update
# Filter images
if min_items:
include = np.array([len(x) >= min_items for x in self.labels]).nonzero()[0].astype(int)
LOGGER.info(f'{prefix}{n - len(include)}/{n} images filtered from dataset')
self.im_files = [self.im_files[i] for i in include]
self.label_files = [self.label_files[i] for i in include]
self.labels = [self.labels[i] for i in include]
self.segments = [self.segments[i] for i in include]
self.shapes = self.shapes[include] # wh
# Create indices
n = len(self.shapes) # number of images
bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index
nb = bi[-1] + 1 # number of batches
self.batch = bi # batch index of image
self.n = n
self.indices = range(n)
# Update labels
include_class = [] # filter labels to include only these classes (optional)
self.segments = list(self.segments)
include_class_array = np.array(include_class).reshape(1, -1)
for i, (label, segment) in enumerate(zip(self.labels, self.segments)):
if include_class:
j = (label[:, 0:1] == include_class_array).any(1)
self.labels[i] = label[j]
if segment:
self.segments[i] = [segment[idx] for idx, elem in enumerate(j) if elem]
if single_cls: # single-class training, merge all classes into 0
self.labels[i][:, 0] = 0
# Rectangular Training
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.im_files = [self.im_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.segments = [self.segments[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride
# Cache images into RAM/disk for faster training
if cache_images == 'ram' and not self.check_cache_ram(prefix=prefix):
cache_images = False
self.ims = [None] * n
self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files]
if cache_images:
b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes
self.im_hw0, self.im_hw = [None] * n, [None] * n
fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image
results = ThreadPool(NUM_THREADS).imap(fcn, range(n))
pbar = tqdm(enumerate(results), total=n, bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0)
for i, x in pbar:
if cache_images == 'disk':
b += self.npy_files[i].stat().st_size
else: # 'ram'
self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)
b += self.ims[i].nbytes
pbar.desc = f'{prefix}Caching images ({b / gb:.1f}GB {cache_images})'
pbar.close()
def check_cache_ram(self, safety_margin=0.1, prefix=''):
# Check image caching requirements vs available memory
b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes
n = min(self.n, 30) # extrapolate from 30 random images
for _ in range(n):
im = cv2.imread(random.choice(self.im_files)) # sample image
ratio = self.img_size / max(im.shape[0], im.shape[1]) # max(h, w) # ratio
b += im.nbytes * ratio ** 2
mem_required = b * self.n / n # GB required to cache dataset into RAM
mem = psutil.virtual_memory()
cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question
if not cache:
LOGGER.info(f'{prefix}{mem_required / gb:.1f}GB RAM required, '
f'{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, '
f"{'caching images ✅' if cache else 'not caching images ⚠️'}")
return cache
def cache_labels(self, path=Path('./labels.cache'), prefix=''):
# Cache dataset labels, check images and read shapes
x = {} # dict
nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
desc = f'{prefix}Scanning {path.parent / path.stem}...'
with Pool(NUM_THREADS) as pool:
pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))),
desc=desc,
total=len(self.im_files),
bar_format=TQDM_BAR_FORMAT)
for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:
nm += nm_f
nf += nf_f
ne += ne_f
nc += nc_f
if im_file:
x[im_file] = [lb, shape, segments]
if msg:
msgs.append(msg)
pbar.desc = f'{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt'
pbar.close()
if msgs:
LOGGER.info('\n'.join(msgs))
if nf == 0:
LOGGER.warning(f'{prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}')
x['hash'] = get_hash(self.label_files + self.im_files)
x['results'] = nf, nm, ne, nc, len(self.im_files)
x['msgs'] = msgs # warnings
x['version'] = self.cache_version # cache version
try:
np.save(path, x) # save cache for next time
path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
LOGGER.info(f'{prefix}New cache created: {path}')
except Exception as e:
LOGGER.warning(f'{prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable: {e}') # not writeable
return x
def __len__(self):
return len(self.im_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
index = self.indices[index] # linear, shuffled, or image_weights
hyp = self.hyp
mosaic = self.mosaic and random.random() < hyp['mosaic']
if mosaic:
# Load mosaic
img, labels = self.load_mosaic(index)
shapes = None
# MixUp augmentation
if random.random() < hyp['mixup']:
img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1)))
else:
# Load image
img, (h0, w0), (h, w) = self.load_image(index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
labels = self.labels[index].copy()
if labels.size: # normalized xywh to pixel xyxy format
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
if self.augment:
img, labels = random_perspective(img,
labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
nl = len(labels) # number of labels
if nl:
labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)
if self.augment:
# Albumentations
img, labels = self.albumentations(img, labels)
nl = len(labels) # update after albumentations
# HSV color-space
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nl:
labels[:, 2] = 1 - labels[:, 2]
# Flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nl:
labels[:, 1] = 1 - labels[:, 1]
# Cutouts
# labels = cutout(img, labels, p=0.5)
# nl = len(labels) # update after cutout
labels_out = torch.zeros((nl, 6))
if nl:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.im_files[index], shapes
def load_image(self, i):
# Loads 1 image from dataset index 'i', returns (im, original hw, resized hw)
im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i],
if im is None: # not cached in RAM
if fn.exists(): # load npy
im = np.load(fn)
else: # read image
im = cv2.imread(f) # BGR
assert im is not None, f'Image Not Found {f}'
h0, w0 = im.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # ratio
if r != 1: # if sizes are not equal
interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA
im = cv2.resize(im, (math.ceil(w0 * r), math.ceil(h0 * r)), interpolation=interp)
return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized
return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized
def cache_images_to_disk(self, i):
# Saves an image as an *.npy file for faster loading
f = self.npy_files[i]
if not f.exists():
np.save(f.as_posix(), cv2.imread(self.im_files[i]))
def load_mosaic(self, index):
# YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic
labels4, segments4 = [], []
s = self.img_size
yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y
indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
random.shuffle(indices)
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = self.load_image(index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
labels4.append(labels)
segments4.extend(segments)
# Concat/clip labels
labels4 = np.concatenate(labels4, 0)
for x in (labels4[:, 1:], *segments4):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img4, labels4 = replicate(img4, labels4) # replicate
# Augment
img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])
img4, labels4 = random_perspective(img4,
labels4,
segments4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def load_mosaic9(self, index):
# YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic
labels9, segments9 = [], []
s = self.img_size
indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
random.shuffle(indices)
hp, wp = -1, -1 # height, width previous
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = self.load_image(index)
# place img in img9
if i == 0: # center
img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
h0, w0 = h, w
c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
elif i == 1: # top
c = s, s - h, s + w, s
elif i == 2: # top right
c = s + wp, s - h, s + wp + w, s
elif i == 3: # right
c = s + w0, s, s + w0 + w, s + h
elif i == 4: # bottom right
c = s + w0, s + hp, s + w0 + w, s + hp + h
elif i == 5: # bottom
c = s + w0 - w, s + h0, s + w0, s + h0 + h
elif i == 6: # bottom left
c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
elif i == 7: # left
c = s - w, s + h0 - h, s, s + h0
elif i == 8: # top left
c = s - w, s + h0 - hp - h, s, s + h0 - hp
padx, pady = c[:2]
x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords
# Labels
labels, segments = self.labels[index].copy(), self.segments[index].copy()
if labels.size:
labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
labels9.append(labels)
segments9.extend(segments)
# Image
img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
hp, wp = h, w # height, width previous
# Offset
yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y
img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
# Concat/clip labels
labels9 = np.concatenate(labels9, 0)
labels9[:, [1, 3]] -= xc
labels9[:, [2, 4]] -= yc
c = np.array([xc, yc]) # centers
segments9 = [x - c for x in segments9]
for x in (labels9[:, 1:], *segments9):
np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
# img9, labels9 = replicate(img9, labels9) # replicate
# Augment
img9, labels9, segments9 = copy_paste(img9, labels9, segments9, p=self.hyp['copy_paste'])
img9, labels9 = random_perspective(img9,
labels9,
segments9,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img9, labels9
@staticmethod
def collate_fn(batch):
im, label, path, shapes = zip(*batch) # transposed
for i, lb in enumerate(label):
lb[:, 0] = i # add target image index for build_targets()
return torch.stack(im, 0), torch.cat(label, 0), path, shapes
@staticmethod
def collate_fn4(batch):
im, label, path, shapes = zip(*batch) # transposed
n = len(shapes) // 4
im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]])
wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]])
s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale
for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
i *= 4
if random.random() < 0.5:
im1 = F.interpolate(im[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear',
align_corners=False)[0].type(im[i].type())
lb = label[i]
else:
im1 = torch.cat((torch.cat((im[i], im[i + 1]), 1), torch.cat((im[i + 2], im[i + 3]), 1)), 2)
lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
im4.append(im1)
label4.append(lb)
for i, lb in enumerate(label4):
lb[:, 0] = i # add target image index for build_targets()
return torch.stack(im4, 0), torch.cat(label4, 0), path4, shapes4
# Ancillary functions --------------------------------------------------------------------------------------------------
def flatten_recursive(path=DATASETS_DIR / 'coco128'):
# Flatten a recursive directory by bringing all files to top level
new_path = Path(f'{str(path)}_flat')
if os.path.exists(new_path):
shutil.rmtree(new_path) # delete output folder
os.makedirs(new_path) # make new output folder
for file in tqdm(glob.glob(f'{str(Path(path))}/**/*.*', recursive=True)):
shutil.copyfile(file, new_path / Path(file).name)
def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders import *; extract_boxes()
# Convert detection dataset into classification dataset, with one directory per class
path = Path(path) # images dir
shutil.rmtree(path / 'classification') if (path / 'classification').is_dir() else None # remove existing
files = list(path.rglob('*.*'))
n = len(files) # number of files
for im_file in tqdm(files, total=n):
if im_file.suffix[1:] in IMG_FORMATS:
# image
im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
h, w = im.shape[:2]
# labels
lb_file = Path(img2label_paths([str(im_file)])[0])
if Path(lb_file).exists():
with open(lb_file) as f:
lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
for j, x in enumerate(lb):
c = int(x[0]) # class
f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
if not f.parent.is_dir():
f.parent.mkdir(parents=True)
b = x[1:] * [w, h, w, h] # box
# b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.2 + 3 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False):
""" Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
Usage: from utils.dataloaders import *; autosplit()
Arguments
path: Path to images directory
weights: Train, val, test weights (list, tuple)
annotated_only: Only use images with an annotated txt file
"""
path = Path(path) # images dir
files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only
n = len(files) # number of files
random.seed(0) # for reproducibility
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
for x in txt:
if (path.parent / x).exists():
(path.parent / x).unlink() # remove existing
print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
for i, img in tqdm(zip(indices, files), total=n):
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
with open(path.parent / txt[i], 'a') as f:
f.write(f'./{img.relative_to(path.parent).as_posix()}' + '\n') # add image to txt file
def verify_image_label(args):
# Verify one image-label pair
im_file, lb_file, prefix = args
nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments
try:
# verify images
im = Image.open(im_file)
im.verify() # PIL verify
shape = exif_size(im) # image size
assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}'
if im.format.lower() in ('jpg', 'jpeg'):
with open(im_file, 'rb') as f:
f.seek(-2, 2)
if f.read() != b'\xff\xd9': # corrupt JPEG
ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100)
msg = f'{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved'
# verify labels
if os.path.isfile(lb_file):
nf = 1 # label found
with open(lb_file) as f:
lb = [x.split() for x in f.read().strip().splitlines() if len(x)]
if any(len(x) > 6 for x in lb): # is segment
classes = np.array([x[0] for x in lb], dtype=np.float32)
segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...)
lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
lb = np.array(lb, dtype=np.float32)
nl = len(lb)
if nl:
assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected'
assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}'
assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}'
_, i = np.unique(lb, axis=0, return_index=True)
if len(i) < nl: # duplicate row check
lb = lb[i] # remove duplicates
if segments:
segments = [segments[x] for x in i]
msg = f'{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed'
else:
ne = 1 # label empty
lb = np.zeros((0, 5), dtype=np.float32)
else:
nm = 1 # label missing
lb = np.zeros((0, 5), dtype=np.float32)
return im_file, lb, shape, segments, nm, nf, ne, nc, msg
except Exception as e:
nc = 1
msg = f'{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}'
return [None, None, None, None, nm, nf, ne, nc, msg]
class HUBDatasetStats():
""" Class for generating HUB dataset JSON and `-hub` dataset directory
Arguments
path: Path to data.yaml or data.zip (with data.yaml inside data.zip)
autodownload: Attempt to download dataset if not found locally
Usage
from utils.dataloaders import HUBDatasetStats
stats = HUBDatasetStats('coco128.yaml', autodownload=True) # usage 1
stats = HUBDatasetStats('path/to/coco128.zip') # usage 2
stats.get_json(save=False)
stats.process_images()
"""
def __init__(self, path='coco128.yaml', autodownload=False):
# Initialize class
zipped, data_dir, yaml_path = self._unzip(Path(path))
try:
with open(check_yaml(yaml_path), errors='ignore') as f:
data = yaml.safe_load(f) # data dict
if zipped:
data['path'] = data_dir
except Exception as e:
raise Exception('error/HUB/dataset_stats/yaml_load') from e
| check_dataset(data, autodownload) # download dataset if missing | 8 | 2023-11-12 13:28:26+00:00 | 12k |
BSoD123456/ffta_us_cn | ffta_finder.py | [
{
"identifier": "c_ffta_sect_tab_ref",
"path": "ffta_sect.py",
"snippet": "class c_ffta_sect_tab_ref(c_ffta_sect_tab):\n \n @staticmethod\n def _TAB_REF_CLS():\n return c_ffta_sect\n \n @tabitm()\n def get_entry(self, ofs):\n return self.readval(ofs, self._TAB_WIDTH, Fals... | from ffta_sect import (
c_ffta_sect_tab_ref, c_ffta_sect_tab_ref_addr,
c_ffta_sect_text_line, c_ffta_sect_text_buf,
c_ffta_sect_text, c_ffta_sect_text_page,
c_ffta_sect_fixed_text, c_ffta_sect_words_text,
)
from hexdump import hexdump as hd
from pprint import pprint as ppr
from ffta_sect import main as sect_main
from ffta_sect import rom_cn, rom_jp, rom_us
from ffta_font import c_ffta_font_drawer
from ffta_charset import c_ffta_charset_us_dummy as c_charset
import pdb | 8,899 | def _scan(self, brk_out):
st = self.ST_SCAN_I
while self.win_ed + self.wd <= self.top_ofs:
#if self.win_ed % 0x10000 == 0:
# print('scan', hex(self.win_ed))
if st == self.ST_SCAN_I:
#print('in', hex(self.win_ed))
st = self._shift_in()
if st == self.ST_SCAN_O:
#print('out', hex(self.win_ed))
if brk_out:
break
st = self._shift_out()
elif st == self.ST_CHECK:
#print('chk', hex(self.win_ed))
st = self._chk_itm_bot()
elif st == self.ST_CHECK_DROP:
#print('chkdrp', hex(self.win_ed))
st = self._chk_itm_bot()
if st != self.ST_FOUND:
st = self.ST_DROPALL
elif st == self.ST_BYPASS:
#print('bp', hex(self.win_ed))
st = self.ST_SCAN_I
elif st == self.ST_DROPALL:
#print('drp', hex(self.win_ed))
if brk_out:
break
st = self._drop_all()
elif st == self.ST_FOUND:
yield self.win_st, self.win_ed, self.win_len, self.win_max
st = self._shift_out()
#yield False, self.win_st, self.win_ed, self.win_len, self.win_max
def scan(self):
yield from _scan(False)
def check(self, ofs = None):
if ofs is None:
ofs = self.win_ed
if ofs % self.wd:
return False, 0, 0
self.reset(ofs)
for st, ed, ln, mx in self._scan(True):
return True, ln, mx
return False, 0, 0
class c_text_checker:
def __init__(self, sect, thrs = (2, 3, 9-3, 7-3, 3, 3)):
self.sect = sect
self.rtf2 = c_ffta_ref_tab_finder(sect, 0, sect._sect_top, 2)
self.rtf4 = c_ffta_ref_tab_finder(sect, 0, sect._sect_top, 4)
self._thrs = thrs
def _chk_tab(self, ofs, cls):
try:
dst = self.sect.subsect(ofs, cls)
for i in dst.iter_item(refresh = True):
pass
except:
return False, None, None, None
sz = dst.sect_top
if sz is None:
assert(dst.tsize < 2)
return False, dst, dst.tsize, None
return True, dst, dst.tsize, sz
def _chk_item(self, ofs, cls):
try:
dst = self.sect.subsect(ofs, cls)
except:
return False, None, None, None
sz = dst.sect_top
if sz is None:
return False, dst, None, None
return True, dst, sz, sz
def check(self, ofs, typ):
cls = (
c_ffta_sect_text, c_ffta_sect_text_page,
c_ffta_sect_text_line, c_ffta_sect_text_buf)
for i, dtyp in enumerate((1, 2, 4, 8)):
if not (typ & dtyp):
continue
if dtyp & 0x1:
fnd, ln, mx = self.rtf4.check(ofs)
elif dtyp & 0x2:
fnd, ln, mx = self.rtf2.check(ofs)
else:
fnd = True
if not fnd:
continue
if dtyp & 0x3:
r = self._chk_tab(ofs, cls[i])
else:
r = self._chk_item(ofs, cls[i])
if r[0] and r[2] >= self._thrs[i]:
return r
return False, None, None, None
def _chk_atab(self, mn, mx, cls):
sz = mx - mn
ln = sz // 4
subrngs = []
try:
sct = self.sect.subsect(mn, cls, self.sect, ln)
for sub in sct:
if sub is None:
continue
if isinstance(sub, c_ffta_sect_tab_ref):
for i in sub.iter_item(refresh = True):
pass
subrngs.append((sub.real_offset, sub.sect_top))
except:
return False, None, None, None, None
return True, subrngs, sct, ln, sz
def check_atab(self, mn, mx, typ):
cls = (
| #! python3
# coding: utf-8
INF = float('inf')
c_symb = object
class c_range_holder:
def __init__(self):
self.rngs = []
def _find_ridx(self, val):
lst_ridx = -1
lst_mx = None
for i, rng in enumerate(self.rngs):
mn, mx = rng
if val < mn:
return False, lst_ridx, i, lst_mx == val, val == mn - 1
elif mn <= val < mx:
return True, i, i, True, True
else:
lst_ridx = i
lst_mx = mx
return False, lst_ridx, len(self.rngs), lst_mx == val, False
def _hold(self, rng, upd):
rngs = self.rngs
mn, mx = rng
rm_ridx_rng = [None, None]
add_rng = [None, None]
adj_cnt = 0
cv1, prv_ri, nxt_ri, rm_prv, rm_nxt = self._find_ridx(mn)
if rm_prv:
rm_ridx_rng[0] = prv_ri
add_rng[0] = rngs[prv_ri][0]
if not cv1:
adj_cnt += 1
else:
rm_ridx_rng[0] = nxt_ri
add_rng[0] = mn
cv2, prv_ri, nxt_ri, rm_prv, rm_nxt = self._find_ridx(mx-1)
if rm_nxt:
rm_ridx_rng[1] = nxt_ri
add_rng[1] = rngs[nxt_ri][1]
if not cv2:
# adj ridx can not be the same
# so just use 1 counter
adj_cnt += 1
else:
rm_ridx_rng[1] = prv_ri
add_rng[1] = mx
rr_cmn, rr_cmx = rm_ridx_rng
add_rng = tuple(add_rng)
if rr_cmn == rr_cmx and cv1 and cv2:
assert(rngs[rr_cmn] == add_rng)
return True, True # cover, include
elif rr_cmn > rr_cmx:
assert(rr_cmn >= 0 and rr_cmn - rr_cmx == 1 and add_rng[0] == mn and add_rng[1] == mx)
if upd:
rngs.insert(rr_cmn, add_rng)
return False, False # cover, include
else:
if rr_cmn < 0:
rr_cmn = 0
inner_cnt = rr_cmx - rr_cmn + 1 - adj_cnt
assert(inner_cnt >= 0)
if upd:
nrngs = rngs[:rr_cmn]
nrngs.append(add_rng)
nrngs.extend(rngs[rr_cmx+1:])
self.rngs = nrngs
return inner_cnt > 0, False # cover, include
def hold(self, rng):
return self._hold(rng, True)
def peek(self, rng):
return self._hold(rng, False)
def peek1(self, ofs):
return self.peek((ofs, ofs+1))
def iter_rngs(self, arng = None):
if not arng:
arng = (0, None)
st, ed = arng
def chk_in_arng(mn, mx):
if (ed and mn >= ed) or mx <= st:
return None
rmn, rmx = max(st, mn), min(ed, mx) if ed else mx
if rmn >= rmx:
return None
return rmn, rmx
lst_mx = 0
for mn, mx in self.rngs:
drng = chk_in_arng(lst_mx, mn)
lst_mx = mx
if drng:
yield drng, False
drng = chk_in_arng(mn, mx)
if drng:
yield drng, True
if ed and ed > lst_mx:
drng = chk_in_arng(lst_mx, ed)
if drng:
yield drng, False
class c_ffta_ref_addr_finder:
def __init__(self, sect, st_ofs, top_ofs, itm_align = 1):
self.sect = sect
self.top_ofs = top_ofs
self.itm_align = itm_align
self.st_ofs = st_ofs
def scan(self):
cur_ofs = self.st_ofs
sect = self.sect
while cur_ofs + 4 <= self.top_ofs:
adr = sect.U32(cur_ofs)
ofs = sect._addr2offs(adr)
if 0 <= ofs < self.top_ofs:
yield ofs, adr, cur_ofs
cur_ofs += 4
class c_ffta_ref_addr_hold_finder(c_ffta_ref_addr_finder):
def __init__(self, *args, addr_holder = None, ignore_item = False, merge_cn = False, **kargs):
super().__init__(*args, **kargs)
if not addr_holder:
addr_holder = c_range_holder()
self.holder = addr_holder
self.ignore_item = ignore_item
self.merge_cn = merge_cn
self._pre_scan()
def _is_ptr(self, ent):
adr = self.sect.U32(ent)
ofs = self.sect._addr2offs(adr)
return 0 < ofs < self.top_ofs, ofs, adr == 0
def _pre_scan(self, adrtab_min = 5-1):
adrtab_min_sz = adrtab_min * 4
cur_ofs = self.st_ofs
rvs_tab = {}
ptr_tab = {}
itm_tab = set()
while cur_ofs + 4 <= self.top_ofs:
cur_ent = cur_ofs
while not (cur_ent in ptr_tab or cur_ent in itm_tab):
is_ptr, nxt_ent, is_null = self._is_ptr(cur_ent)
if is_ptr:
#self.holder.hold((cur_ent, cur_ent + 4)) # too slow
ptr_tab[cur_ent] = nxt_ent
if not nxt_ent in rvs_tab:
rvs_tab[nxt_ent] = []
rvs_tab[nxt_ent].append(cur_ent)
else:
if is_null:
ptr_tab[cur_ent] = None
if cur_ent != cur_ofs:
itm_tab.add(cur_ent)
break
cur_ent = nxt_ent
cur_ofs += 4
adr_tab = []
ptr_sort = sorted(k for k in ptr_tab)
lst_mn = None
lst_ofs = 0
# insert another last ptr to handle the real last one
_af = (1 << 32) - 1
ptr_sort.append(_af)
for ofs in ptr_sort:
if not ofs < _af:
continue
ofs_p = ptr_tab[ofs]
if not ofs_p is None and not ofs_p in itm_tab and not self.ignore_item:
continue
is_rng = False
if ofs == lst_ofs + 4:
if lst_mn is None:
lst_mn = lst_ofs
elif not lst_mn is None:
mn = lst_mn
mx = lst_ofs + 4
lst_mn = None
is_rng = True
lst_ofs = ofs
if not is_rng:
continue
if mx - mn < adrtab_min_sz:
continue
lst_dofs = None
for dofs in range(mn, mx, 4):
if not dofs in rvs_tab:
continue
if not lst_dofs is None and dofs - lst_dofs >= adrtab_min_sz:
adr_tab.append((lst_dofs, dofs))
lst_dofs = dofs
if self.merge_cn:
break
if not lst_dofs is None and mx - lst_dofs >= adrtab_min_sz:
adr_tab.append((lst_dofs, mx))
self.ptr_tab = ptr_tab
self.rvs_tab = rvs_tab
self.itm_tab = itm_tab
self.adr_tab = adr_tab
def scan_adrtab(self, adrtab_min = 5):
self._last_hold = None
rmati = []
for ati, (mn, mx) in enumerate(self.adr_tab):
yield mn, mx
if mn == self._last_hold:
rmati.append(ati)
for ati in reversed(rmati):
self.adr_tab.pop(ati)
def scan(self):
self._last_hold = None
for ofs in sorted(self.itm_tab):
cv, incld = self.holder.peek1(ofs)
if cv:
continue
yield ofs
if ofs == self._last_hold:
self.itm_tab.remove(ofs)
def hold(self, ofs, top):
if top is None:
top = 1
self.holder.hold((ofs, ofs + top))
self._last_hold = ofs
class c_ffta_ref_tab_finder:
ST_DROPALL = c_symb()
ST_BYPASS = c_symb()
ST_FOUND = c_symb()
ST_SCAN_I = c_symb()
ST_SCAN_O = c_symb()
ST_CHECK = c_symb()
ST_CHECK_DROP = c_symb()
def __init__(self, sect, st_ofs, top_ofs, ent_width, itm_align = 1):
self.sect = sect
self.top_ofs = top_ofs
self.wd = ent_width
if itm_align is None:
itm_align = ent_width
self.itm_align = itm_align
self.ENT_A0 = 0
self.ENT_AF = (1 << self.wd * 8) - 1
st_ofs = (st_ofs // self.wd) * self.wd
self.win = []
self.win_st = st_ofs
self.win_ed = st_ofs
self.win_min = INF
self.win_max = 0
def reset(self, st_ofs):
st_ofs = (st_ofs // self.wd) * self.wd
self.win_ed = st_ofs
self._drop_all()
@property
def win_len(self):
l = self.win_ed - self.win_st
assert(l == len(self.win) * self.wd)
return l // self.wd
def _ent2ofs(self, ent):
return self.win_st + ent
def _ofs2ent(self, ofs):
assert(ofs >= self.win_st)
return ofs - self.win_st
def _hndl_a0(self):
return True
def _hndl_af(self):
return False
def _shift_in(self):
ent = self.sect.readval(self.win_ed, self.wd, False)
self.win_ed += self.wd
self.win.append(ent)
if ent == self.ENT_A0:
bypass = self._hndl_a0()
elif ent == self.ENT_AF:
bypass = self._hndl_af()
else:
bypass = None
if bypass is True:
return self.ST_BYPASS
elif bypass is False:
return self.ST_CHECK_DROP
else:
pass
if self._ent2ofs(ent) % self.itm_align:
return self.ST_DROPALL
if ent > self.win_max:
self.win_max = ent
if ent < self.win_min:
self.win_min = ent
return self.ST_CHECK
def _shift_out(self):
self.win_st += self.wd
if self.win_st == self.win_ed:
return self.ST_DROPALL
ent = self.win.pop(0)
a0 = self.ENT_A0
af = self.ENT_AF
if ent == a0 or ent == af:
return self.ST_CHECK
upd_min = (ent == self.win_min)
upd_max = (ent == self.win_max)
if not (upd_min or upd_max):
return self.ST_CHECK
wmin = INF
wmax = 0
for ent in self.win:
if ent == a0:
continue
elif ent == af:
continue
if upd_min and ent < wmin:
wmin = ent
if upd_max and ent > wmax:
wmax = ent
if upd_min:
self.win_min = wmin
if upd_max:
self.win_max = wmax
return self.ST_CHECK
def _chk_itm_bot(self):
ed = self.win_ed
wmin = self._ent2ofs(self.win_min)
wmax = self._ent2ofs(self.win_max)
if ed == wmin:
return self.ST_FOUND
elif ed > wmin or wmax >= self.top_ofs:
return self.ST_SCAN_O
return self.ST_SCAN_I
def _drop_all(self):
self.win.clear()
self.win_st = self.win_ed
self.win_min = INF
self.win_max = 0
return self.ST_SCAN_I
def _scan(self, brk_out):
st = self.ST_SCAN_I
while self.win_ed + self.wd <= self.top_ofs:
#if self.win_ed % 0x10000 == 0:
# print('scan', hex(self.win_ed))
if st == self.ST_SCAN_I:
#print('in', hex(self.win_ed))
st = self._shift_in()
if st == self.ST_SCAN_O:
#print('out', hex(self.win_ed))
if brk_out:
break
st = self._shift_out()
elif st == self.ST_CHECK:
#print('chk', hex(self.win_ed))
st = self._chk_itm_bot()
elif st == self.ST_CHECK_DROP:
#print('chkdrp', hex(self.win_ed))
st = self._chk_itm_bot()
if st != self.ST_FOUND:
st = self.ST_DROPALL
elif st == self.ST_BYPASS:
#print('bp', hex(self.win_ed))
st = self.ST_SCAN_I
elif st == self.ST_DROPALL:
#print('drp', hex(self.win_ed))
if brk_out:
break
st = self._drop_all()
elif st == self.ST_FOUND:
yield self.win_st, self.win_ed, self.win_len, self.win_max
st = self._shift_out()
#yield False, self.win_st, self.win_ed, self.win_len, self.win_max
def scan(self):
yield from _scan(False)
def check(self, ofs = None):
if ofs is None:
ofs = self.win_ed
if ofs % self.wd:
return False, 0, 0
self.reset(ofs)
for st, ed, ln, mx in self._scan(True):
return True, ln, mx
return False, 0, 0
class c_text_checker:
def __init__(self, sect, thrs = (2, 3, 9-3, 7-3, 3, 3)):
self.sect = sect
self.rtf2 = c_ffta_ref_tab_finder(sect, 0, sect._sect_top, 2)
self.rtf4 = c_ffta_ref_tab_finder(sect, 0, sect._sect_top, 4)
self._thrs = thrs
def _chk_tab(self, ofs, cls):
try:
dst = self.sect.subsect(ofs, cls)
for i in dst.iter_item(refresh = True):
pass
except:
return False, None, None, None
sz = dst.sect_top
if sz is None:
assert(dst.tsize < 2)
return False, dst, dst.tsize, None
return True, dst, dst.tsize, sz
def _chk_item(self, ofs, cls):
try:
dst = self.sect.subsect(ofs, cls)
except:
return False, None, None, None
sz = dst.sect_top
if sz is None:
return False, dst, None, None
return True, dst, sz, sz
def check(self, ofs, typ):
cls = (
c_ffta_sect_text, c_ffta_sect_text_page,
c_ffta_sect_text_line, c_ffta_sect_text_buf)
for i, dtyp in enumerate((1, 2, 4, 8)):
if not (typ & dtyp):
continue
if dtyp & 0x1:
fnd, ln, mx = self.rtf4.check(ofs)
elif dtyp & 0x2:
fnd, ln, mx = self.rtf2.check(ofs)
else:
fnd = True
if not fnd:
continue
if dtyp & 0x3:
r = self._chk_tab(ofs, cls[i])
else:
r = self._chk_item(ofs, cls[i])
if r[0] and r[2] >= self._thrs[i]:
return r
return False, None, None, None
def _chk_atab(self, mn, mx, cls):
sz = mx - mn
ln = sz // 4
subrngs = []
try:
sct = self.sect.subsect(mn, cls, self.sect, ln)
for sub in sct:
if sub is None:
continue
if isinstance(sub, c_ffta_sect_tab_ref):
for i in sub.iter_item(refresh = True):
pass
subrngs.append((sub.real_offset, sub.sect_top))
except:
return False, None, None, None, None
return True, subrngs, sct, ln, sz
def check_atab(self, mn, mx, typ):
cls = ( | c_ffta_sect_fixed_text, c_ffta_sect_words_text) | 6 | 2023-11-12 18:43:53+00:00 | 12k |
civrealm/civrealm | src/civrealm/envs/freeciv_wrapper/tensor_wrapper.py | [
{
"identifier": "TensorAction",
"path": "src/civrealm/envs/freeciv_wrapper/action_wrapper.py",
"snippet": "class TensorAction(Wrapper):\n \"\"\"\n A wrapper that defines tensor action spaces, transforms tensor actions into\n actions that could be handeled by FreecivBaseEnv instance, and adds m... | import numpy as np
from civrealm.envs import FreecivBaseEnv
from civrealm.envs.freeciv_wrapper.config import default_tensor_config
from .action_wrapper import TensorAction
from .core import Wrapper
from .observation_wrapper import CacheLastObs, TensorObservation
from .tensor_base_wrapper import TensorBase | 7,333 |
class TensorWrapper(Wrapper):
"""
TensorWrapper is used to make Civrealm environment tensorized by converting
observations from FreecivBaseEnv into tensors and tensor actions back to actions compatible with
FreecivBaseEnv.
TensorWrapper is composed `TensorBase`, `TensorAction`, `TensorObservation`
and `CacheLastObs`.
Parameters
----------
env
config:
tensor env configuration
Attributes
----------
config: dict
tensor wrapper configuration
"""
def __init__(self, env: FreecivBaseEnv, config: dict = default_tensor_config):
self.config = config
super().__init__(
CacheLastObs(
|
class TensorWrapper(Wrapper):
"""
TensorWrapper is used to make Civrealm environment tensorized by converting
observations from FreecivBaseEnv into tensors and tensor actions back to actions compatible with
FreecivBaseEnv.
TensorWrapper is composed `TensorBase`, `TensorAction`, `TensorObservation`
and `CacheLastObs`.
Parameters
----------
env
config:
tensor env configuration
Attributes
----------
config: dict
tensor wrapper configuration
"""
def __init__(self, env: FreecivBaseEnv, config: dict = default_tensor_config):
self.config = config
super().__init__(
CacheLastObs( | TensorObservation(TensorAction(TensorBase(env, config=config))) | 0 | 2023-11-18 19:35:50+00:00 | 12k |
RAIVNLab/MatFormer-OLMo | olmo/model.py | [
{
"identifier": "PathOrStr",
"path": "olmo/aliases.py",
"snippet": ""
},
{
"identifier": "BeamSearch",
"path": "olmo/beam_search.py",
"snippet": "class BeamSearch:\n \"\"\"\n Implements the beam search algorithm for decoding the most likely sequences.\n\n :param end_index: The i... | import math
import os
import torch
import torch.backends.cuda
import torch.nn as nn
import torch.nn.functional as F
import warnings
from abc import abstractmethod
from typing import Dict, List, NamedTuple, Optional, Sequence, Tuple, cast
from torch import einsum
from .aliases import PathOrStr
from .beam_search import BeamSearch, Constraint, FinalSequenceScorer, Sampler
from .config import ActivationType, BlockType, LayerNormType, ModelConfig
from .exceptions import OlmoConfigurationError
from functools import partial
from cached_path import cached_path | 8,310 | """
Adapted from
[MosaiclML](https://github.com/mosaicml/examples.git) and
[minGPT](https://github.com/karpathy/minGPT.git)
"""
from __future__ import annotations
__all__ = [
"LayerNormBase",
"LayerNorm",
"RMSLayerNorm",
"RotaryEmbedding",
"Activation",
"GELU",
"ReLU",
"SwiGLU",
"OlmoBlock",
"OlmoSequentialBlock",
"OlmoParallelBlock",
"Olmo",
"OlmoOutput",
"OlmoGenerateOutput",
]
class MatformerManager:
_instance = None
def __init__(self):
raise RuntimeError("Call get_instance() instead")
def initialize(self):
self.current_factor = 1
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls.__new__(cls)
cls._instance.initialize()
return cls._instance
class LayerNormBase(nn.Module):
| """
Adapted from
[MosaiclML](https://github.com/mosaicml/examples.git) and
[minGPT](https://github.com/karpathy/minGPT.git)
"""
from __future__ import annotations
__all__ = [
"LayerNormBase",
"LayerNorm",
"RMSLayerNorm",
"RotaryEmbedding",
"Activation",
"GELU",
"ReLU",
"SwiGLU",
"OlmoBlock",
"OlmoSequentialBlock",
"OlmoParallelBlock",
"Olmo",
"OlmoOutput",
"OlmoGenerateOutput",
]
class MatformerManager:
_instance = None
def __init__(self):
raise RuntimeError("Call get_instance() instead")
def initialize(self):
self.current_factor = 1
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls.__new__(cls)
cls._instance.initialize()
return cls._instance
class LayerNormBase(nn.Module): | def __init__(self, config: ModelConfig): | 8 | 2023-11-14 02:24:07+00:00 | 12k |
1in-oos/ccplus | caringcaribou/tests/test_module_uds.py | [
{
"identifier": "Constants",
"path": "caringcaribou/utils/iso14229_1.py",
"snippet": "class Constants(object):\n # NR_SI (Negative Response Service Identifier) is a bit special, since\n # it is not a service per se.\n # From ISO-14229-1 specification: \"The NR_SI value is co-ordinated with\n ... | from caringcaribou.utils.iso14229_1 import Constants, Iso14229_1, NegativeResponseCodes, ServiceID, Services
from caringcaribou.tests.mock.mock_ecu_uds import MockEcuIso14229
from caringcaribou.modules import uds
import unittest | 9,807 | from __future__ import print_function
class UdsModuleTestCase(unittest.TestCase):
ARB_ID_REQUEST = 0x300E
ARB_ID_RESPONSE = 0x300F
# Timeout (in seconds) when waiting for response during bruteforce
BRUTEFORCE_TIMEOUT = 0.01
def setUp(self):
# Initialize mock ECU
| from __future__ import print_function
class UdsModuleTestCase(unittest.TestCase):
ARB_ID_REQUEST = 0x300E
ARB_ID_RESPONSE = 0x300F
# Timeout (in seconds) when waiting for response during bruteforce
BRUTEFORCE_TIMEOUT = 0.01
def setUp(self):
# Initialize mock ECU | self.ecu = MockEcuIso14229(self.ARB_ID_REQUEST, self.ARB_ID_RESPONSE) | 5 | 2023-11-13 05:05:46+00:00 | 12k |
L1bra1/WeakMotion | gen_data/step2_waymo_generate_weak.py | [
{
"identifier": "Box",
"path": "gen_data/nuscenes/utils/data_classes.py",
"snippet": "class Box:\n \"\"\" Simple data class representing a 3d box including, label, score and velocity. \"\"\"\n\n def __init__(self,\n center: List[float],\n size: List[float],\n ... | import numpy as np
import os
import os.path as osp
import copy
import tqdm
import pickle
import argparse
from pathlib import Path
from functools import reduce
from gen_data.nuscenes.utils.data_classes import Box
from pyquaternion import Quaternion
from gen_data.waymo_data_utils import process_past_pc_waymo, build_BEV_input_waymo, build_BEV_gt_waymo, convert_to_sparse_bev_waymo
from gen_data.gen_weak_waymo_utils import gen_weak_supervision
| 9,016 | """
Prepare the input data, motion ground truth, and Foreground/Background information for Waymo data.
"""
obj_class_map = {
"Vehicle": 1, "Pedestrian":2, "Cyclist": 3, "Others": 4
} # take sign as others
voxel_size = (0.25, 0.25, 0.4)
area_extents = np.array([[-32., 32.], [-32., 32.], [-1., 4.]])
def check_folder(folder_name):
if not os.path.exists(folder_name):
os.mkdir(folder_name)
return folder_name
def create_waymo_infos(root_path, save_root_path, mode):
flow_time_gap = 1
past_time_gap = 1
check_folder(os.path.join(save_root_path, 'input-data'))
check_folder(os.path.join(save_root_path, 'input-data', mode))
if mode == "train":
check_folder(os.path.join(save_root_path, 'weak-data'))
check_folder(os.path.join(save_root_path, 'weak-data', mode))
sample_dir = check_folder(os.path.join(save_root_path, 'weak-data', 'train_sample_info'))
if mode == "train":
scene_list_file = "ImageSets/train.txt"
sample_time_gap = 1.0
type = 'training'
elif mode == "val":
scene_list_file = "ImageSets/val.txt"
sample_time_gap = 1.0
type = 'validation'
else:
assert Exception
with open(scene_list_file, 'r') as f:
scene_list = f.readlines()
scene_list = [s.strip().split(".")[0] for s in scene_list]
print("finish loading scene list")
sample_data_step = int(sample_time_gap * 10)
flow_data_step = int(flow_time_gap * 10)
past_data_step = int(past_time_gap * 10)
past_data_sample_index = np.arange(0, past_data_step, 2)
future_data_sample_index = np.arange(1, 1 + flow_data_step, 1)
for scene_name in tqdm.tqdm(scene_list):
lidar_path = root_path / type / scene_name
assert osp.exists(lidar_path)
ann_path = lidar_path / f"{scene_name}.pkl"
pc_random_index_dict = dict()
pc_down_sample_dict = dict()
with open(ann_path, 'rb') as f:
ann_data = pickle.load(f)
num_lidar = len(ann_data)
for i in range(0, num_lidar, sample_data_step):
# remove unenough prev and future sweep
if i < past_data_step or i > (num_lidar - 1 - flow_data_step):
continue
''' get current info'''
ann_i = ann_data[i]
# extract info about reference key
pose = ann_i["pose"] # global_from_car, convert pc in car system to global system
ts = ann_i["time_stamp"]
token = "{}_{:04d}".format(scene_name, i)
''' get past pc '''
past_pc_list = process_past_pc_waymo(scene_name, lidar_path, ann_data, i, past_data_sample_index, pose, ts)
''' build BEV input & gt '''
| """
Prepare the input data, motion ground truth, and Foreground/Background information for Waymo data.
"""
obj_class_map = {
"Vehicle": 1, "Pedestrian":2, "Cyclist": 3, "Others": 4
} # take sign as others
voxel_size = (0.25, 0.25, 0.4)
area_extents = np.array([[-32., 32.], [-32., 32.], [-1., 4.]])
def check_folder(folder_name):
if not os.path.exists(folder_name):
os.mkdir(folder_name)
return folder_name
def create_waymo_infos(root_path, save_root_path, mode):
flow_time_gap = 1
past_time_gap = 1
check_folder(os.path.join(save_root_path, 'input-data'))
check_folder(os.path.join(save_root_path, 'input-data', mode))
if mode == "train":
check_folder(os.path.join(save_root_path, 'weak-data'))
check_folder(os.path.join(save_root_path, 'weak-data', mode))
sample_dir = check_folder(os.path.join(save_root_path, 'weak-data', 'train_sample_info'))
if mode == "train":
scene_list_file = "ImageSets/train.txt"
sample_time_gap = 1.0
type = 'training'
elif mode == "val":
scene_list_file = "ImageSets/val.txt"
sample_time_gap = 1.0
type = 'validation'
else:
assert Exception
with open(scene_list_file, 'r') as f:
scene_list = f.readlines()
scene_list = [s.strip().split(".")[0] for s in scene_list]
print("finish loading scene list")
sample_data_step = int(sample_time_gap * 10)
flow_data_step = int(flow_time_gap * 10)
past_data_step = int(past_time_gap * 10)
past_data_sample_index = np.arange(0, past_data_step, 2)
future_data_sample_index = np.arange(1, 1 + flow_data_step, 1)
for scene_name in tqdm.tqdm(scene_list):
lidar_path = root_path / type / scene_name
assert osp.exists(lidar_path)
ann_path = lidar_path / f"{scene_name}.pkl"
pc_random_index_dict = dict()
pc_down_sample_dict = dict()
with open(ann_path, 'rb') as f:
ann_data = pickle.load(f)
num_lidar = len(ann_data)
for i in range(0, num_lidar, sample_data_step):
# remove unenough prev and future sweep
if i < past_data_step or i > (num_lidar - 1 - flow_data_step):
continue
''' get current info'''
ann_i = ann_data[i]
# extract info about reference key
pose = ann_i["pose"] # global_from_car, convert pc in car system to global system
ts = ann_i["time_stamp"]
token = "{}_{:04d}".format(scene_name, i)
''' get past pc '''
past_pc_list = process_past_pc_waymo(scene_name, lidar_path, ann_data, i, past_data_sample_index, pose, ts)
''' build BEV input & gt '''
| padded_voxel_points, voxel_indices_list = build_BEV_input_waymo(past_pc_list, past_data_sample_index, voxel_size, area_extents)
| 2 | 2023-11-12 07:03:29+00:00 | 12k |
c3exchange/c3-smartcontracts-v1 | contracts_unified/core/methods/settle.py | [
{
"identifier": "ARG_INDEX_ACCOUNT",
"path": "contracts_unified/core/c3call.py",
"snippet": "ARG_INDEX_ACCOUNT = Int(1)"
},
{
"identifier": "ARG_INDEX_OP",
"path": "contracts_unified/core/c3call.py",
"snippet": "ARG_INDEX_OP = Int(2)"
},
{
"identifier": "ARG_INDEX_SELECTOR",
... | from typing import cast
from pyteal import (
ABIReturnSubroutine,
And,
Assert,
BytesGe,
BytesMul,
Expr,
Global,
If,
Int,
Itob,
MethodSignature,
Not,
OnComplete,
Or,
Seq,
abi,
)
from contracts_unified.core.c3call import (
ARG_INDEX_ACCOUNT,
ARG_INDEX_OP,
ARG_INDEX_SELECTOR,
)
from contracts_unified.core.internal.health_check import health_check
from contracts_unified.core.internal.move import collect_fees, signed_add_to_cash
from contracts_unified.core.internal.perform_pool_move import perform_pool_move
from contracts_unified.core.internal.setup import setup
from contracts_unified.core.internal.validate_sender import sender_is_sig_validator
from contracts_unified.core.state_handler.order_handler import OrderStateHandler
from contracts_unified.library.c3types import (
AccountAddress,
Amount,
Boolean,
ExcessMargin,
InstrumentId,
OnChainOrderData,
OrderId,
SignedAmount,
)
from contracts_unified.library.c3types_server import SettleExtraData
from contracts_unified.library.c3types_user import (
DelegationChain,
OperationId,
OperationMetaData,
OrderData,
)
from contracts_unified.library.signed_math import signed_gte, signed_ltz, signed_neg | 7,760 | user_op: OperationMetaData,
_delegation_chain: DelegationChain,
server_args: SettleExtraData,
opup_budget: Amount,
) -> Expr:
"""Settles two orders
Arguments:
add_order_txn (ApplicationCallTransaction): The previous add_order transaction in this group that added the sell order to the order book.
buy_account (AccountAddress): The buyer user's account address.
user_op (OperationMetaData): Operation metadata containing buyer order data.
_delegation_chain (DelegationChain): Delegation chain. Unused.
server_args (SettleExtraData): Extra data for the settle operation.
opup_budget (Amount): Additional computation budget to allocate to this transaction.
"""
abi_false = abi.Bool()
add_order_op = OperationMetaData()
add_order_data = abi.make(abi.DynamicBytes)
buy_order = OrderData()
sell_order = OrderData()
sell_account = AccountAddress()
buy_order_id = abi.make(OrderId)
sell_order_id = abi.make(OrderId)
buy_order_onchain = OnChainOrderData()
sell_order_onchain = OnChainOrderData()
# Amounts for each order's buy/sell side
buyer_sell_amount = Amount()
buyer_buy_amount = Amount()
seller_sell_amount = Amount()
seller_buy_amount = Amount()
# Remaining amounts for each order's buy/sell side
buyer_sell_remaining = Amount()
buyer_borrow_remaining = Amount()
buyer_repay_remaining = Amount()
seller_sell_remaining = Amount()
seller_borrow_remaining = Amount()
seller_repay_remaining = Amount()
# New remaining amounts for each order's buy/sell side
buyer_new_sell_remaining = Amount()
buyer_new_borrow_remaining = Amount()
buyer_new_repay_remaining = Amount()
seller_new_sell_remaining = Amount()
seller_new_borrow_remaining = Amount()
seller_new_repay_remaining = Amount()
buyer_new_order_onchain = OnChainOrderData()
seller_new_order_onchain = OnChainOrderData()
buyer_buy_instrument = InstrumentId()
buyer_sell_instrument = InstrumentId()
seller_buy_instrument = InstrumentId()
seller_sell_instrument = InstrumentId()
buyer_to_send = Amount()
seller_to_send = Amount()
buyer_to_borrow = Amount()
seller_to_borrow = Amount()
buyer_to_repay = Amount()
seller_to_repay = Amount()
buyer_buy_delta = Amount()
seller_buy_delta = Amount()
buyer_sell_delta = Amount()
seller_sell_delta = Amount()
neg_borrow = SignedAmount()
buyer_fees = Amount()
seller_fees = Amount()
buyer_old_health = ExcessMargin()
buyer_health = ExcessMargin()
seller_old_health = ExcessMargin()
seller_health = ExcessMargin()
buyer_negative_margin = Boolean()
seller_negative_margin = Boolean()
return Seq(
setup(opup_budget.get()),
# Set constants
abi_false.set(Int(0)),
# Validate sender is a user proxy
cast(Expr, sender_is_sig_validator()),
# Extract the buy order
user_op.operation.use(lambda op_data:
Seq(
buy_order.decode(op_data.get()),
buy_order.operation.use(lambda op: Assert(op.get() == OperationId.Settle)),
buy_order.account.use(lambda acc: Assert(acc.get() == buy_account.get())),
)
),
# Add the order to the order book
cast(Expr, OrderStateHandler.add_order(buy_order)),
# Validate the sell order
Assert(add_order_txn.get().application_id() == Global.current_application_id()),
Assert(add_order_txn.get().on_completion() == OnComplete.NoOp),
Assert(add_order_txn.get().application_args.length() == ADD_ORDER_ARG_COUNT),
Assert(add_order_txn.get().application_args[ARG_INDEX_SELECTOR] == ADD_ORDER_SIG),
# Get the sell order
sell_account.decode(add_order_txn.get().application_args[ARG_INDEX_ACCOUNT]),
| """
Implements Core contract method for settling a pair of orders.
"""
ADD_ORDER_SIG = MethodSignature("add_order(address,((address,byte[32],uint64),byte[],byte[],uint8,byte[],address,byte[]),((address,byte[32],uint64),byte[],byte[],uint8,byte[],address,byte[])[],uint64)void")
ADD_ORDER_ARG_COUNT = Int(5)
MAX_FEES_DIVISOR = Int(40)
@ABIReturnSubroutine
def add_order(
# NOTE: Any update on this function must update ADD_ORDER_SIG and ADD_ORDER_ARG_COUNT above
account: AccountAddress,
user_op: OperationMetaData,
_delegation_chain: DelegationChain,
opup_budget: Amount,
) -> Expr:
"""Adds an order to the order book
Arguments:
account (AccountAddress): User's account address.
user_op (OperationMetaData): Operation metadata containing order data.
_delegation_chain (DelegationChain): Delegation chain. Unused.
opup_budget (Amount): Additional computation budget to allocate to this transaction.
"""
order = OrderData()
return Seq(
setup(opup_budget.get()),
# Validate signature validator' call
cast(Expr, sender_is_sig_validator()),
# Get order from user_op.data
user_op.operation.use(lambda op_data:
Seq(
order.decode(op_data.get()),
order.operation.use(lambda op: Assert(op.get() == OperationId.Settle)),
order.account.use(lambda acc: Assert(acc.get() == account.get()))
)
),
# Add order to the order book
cast(Expr, OrderStateHandler.add_order(order))
)
@ABIReturnSubroutine
def settle(
add_order_txn: abi.ApplicationCallTransaction,
buy_account: AccountAddress,
user_op: OperationMetaData,
_delegation_chain: DelegationChain,
server_args: SettleExtraData,
opup_budget: Amount,
) -> Expr:
"""Settles two orders
Arguments:
add_order_txn (ApplicationCallTransaction): The previous add_order transaction in this group that added the sell order to the order book.
buy_account (AccountAddress): The buyer user's account address.
user_op (OperationMetaData): Operation metadata containing buyer order data.
_delegation_chain (DelegationChain): Delegation chain. Unused.
server_args (SettleExtraData): Extra data for the settle operation.
opup_budget (Amount): Additional computation budget to allocate to this transaction.
"""
abi_false = abi.Bool()
add_order_op = OperationMetaData()
add_order_data = abi.make(abi.DynamicBytes)
buy_order = OrderData()
sell_order = OrderData()
sell_account = AccountAddress()
buy_order_id = abi.make(OrderId)
sell_order_id = abi.make(OrderId)
buy_order_onchain = OnChainOrderData()
sell_order_onchain = OnChainOrderData()
# Amounts for each order's buy/sell side
buyer_sell_amount = Amount()
buyer_buy_amount = Amount()
seller_sell_amount = Amount()
seller_buy_amount = Amount()
# Remaining amounts for each order's buy/sell side
buyer_sell_remaining = Amount()
buyer_borrow_remaining = Amount()
buyer_repay_remaining = Amount()
seller_sell_remaining = Amount()
seller_borrow_remaining = Amount()
seller_repay_remaining = Amount()
# New remaining amounts for each order's buy/sell side
buyer_new_sell_remaining = Amount()
buyer_new_borrow_remaining = Amount()
buyer_new_repay_remaining = Amount()
seller_new_sell_remaining = Amount()
seller_new_borrow_remaining = Amount()
seller_new_repay_remaining = Amount()
buyer_new_order_onchain = OnChainOrderData()
seller_new_order_onchain = OnChainOrderData()
buyer_buy_instrument = InstrumentId()
buyer_sell_instrument = InstrumentId()
seller_buy_instrument = InstrumentId()
seller_sell_instrument = InstrumentId()
buyer_to_send = Amount()
seller_to_send = Amount()
buyer_to_borrow = Amount()
seller_to_borrow = Amount()
buyer_to_repay = Amount()
seller_to_repay = Amount()
buyer_buy_delta = Amount()
seller_buy_delta = Amount()
buyer_sell_delta = Amount()
seller_sell_delta = Amount()
neg_borrow = SignedAmount()
buyer_fees = Amount()
seller_fees = Amount()
buyer_old_health = ExcessMargin()
buyer_health = ExcessMargin()
seller_old_health = ExcessMargin()
seller_health = ExcessMargin()
buyer_negative_margin = Boolean()
seller_negative_margin = Boolean()
return Seq(
setup(opup_budget.get()),
# Set constants
abi_false.set(Int(0)),
# Validate sender is a user proxy
cast(Expr, sender_is_sig_validator()),
# Extract the buy order
user_op.operation.use(lambda op_data:
Seq(
buy_order.decode(op_data.get()),
buy_order.operation.use(lambda op: Assert(op.get() == OperationId.Settle)),
buy_order.account.use(lambda acc: Assert(acc.get() == buy_account.get())),
)
),
# Add the order to the order book
cast(Expr, OrderStateHandler.add_order(buy_order)),
# Validate the sell order
Assert(add_order_txn.get().application_id() == Global.current_application_id()),
Assert(add_order_txn.get().on_completion() == OnComplete.NoOp),
Assert(add_order_txn.get().application_args.length() == ADD_ORDER_ARG_COUNT),
Assert(add_order_txn.get().application_args[ARG_INDEX_SELECTOR] == ADD_ORDER_SIG),
# Get the sell order
sell_account.decode(add_order_txn.get().application_args[ARG_INDEX_ACCOUNT]), | add_order_op.decode(add_order_txn.get().application_args[ARG_INDEX_OP]), | 1 | 2023-11-17 20:54:15+00:00 | 12k |
gunderson-dettmer/CE2OCF | CE2OCF/datamap/parsers.py | [
{
"identifier": "__version__",
"path": "CE2OCF/__about__.py",
"snippet": ""
},
{
"identifier": "traverse_datamap",
"path": "CE2OCF/datamap/crawler.py",
"snippet": "def traverse_datamap(\n datamap: dict[str, Any] | BaseModel | str | list | FieldPostProcessorModel,\n field_name: str ... | import datetime
from pathlib import Path
from typing import Callable, Literal, Optional
from CE2OCF import __version__ as version
from CE2OCF.datamap.crawler import traverse_datamap
from CE2OCF.datamap.loaders import (
DEFAULT_CE_TO_OCF_DATAMAP_PREFERRED_STOCK_LEGEND_ONLY_PATH,
DEFAULT_CE_TO_OCF_PREFERRED_STOCK_CLASS_ONLY_PATH,
load_ce_to_ocf_issuer_datamap,
load_ce_to_ocf_stakeholder_datamap,
load_ce_to_ocf_stock_class_datamap,
load_ce_to_ocf_stock_legend_datamap,
load_ce_to_ocf_stock_plan_datamap,
load_ce_to_ocf_vested_issuances_datamap,
load_ce_to_ocf_vesting_issuances_datamap,
load_vesting_events_driving_enums_datamap,
load_vesting_schedule_driving_enums_datamap,
)
from CE2OCF.ocf.datamaps import (
FullyVestedStockIssuanceDataMap,
IssuerDataMap,
RepeatableStockholderDataMap,
StockClassDataMap,
StockLegendDataMap,
StockPlanDataMap,
VestingScheduleInputsDataMap,
VestingStockIssuanceDataMap,
)
from CE2OCF.ocf.generators.ocf_id_generators import (
generate_vesting_start_id,
)
from CE2OCF.ocf.generators.ocf_vesting_events import (
generate_vesting_start_event,
)
from CE2OCF.types.dictionaries import ContractExpressVarObj
from CE2OCF.types.enums import VestingTypesEnum
from CE2OCF.types.exceptions import VariableNotFound | 8,491 | raise ValueError("We only support COMMON or PREFERRED datamaps")
ocf_stock_legend = traverse_datamap(
stock_legend_datamap,
None,
ce_jsons,
value_overrides={"PARSER_VERSION": version, **value_overrides},
fail_on_missing_variable=fail_on_missing_variable,
)
# TODO - improve type checking to check for actual target OCF schema
assert isinstance(ocf_stock_legend, dict), f"Expected ocf_stock_legend to be dict, got {type(ocf_stock_legend)}"
return ocf_stock_legend
def parse_ocf_stakeholders_from_ce_json(
ce_jsons: list[ContractExpressVarObj],
post_processors: Optional[dict[str, Callable]] = None,
clear_old_post_processors: bool = True,
fail_on_missing_variable: bool = False,
custom_datamap_path: Optional[Path] = None,
value_overrides: Optional[dict[str, str]] = None,
) -> list[dict]:
"""
By default, loads our default ce to ocf stakeholder datamap (though you can provide a path your own JSON datamap)
and uses it to parse a list of valid OCF stakeholder objects from a list of ce_json objects.
Args:
ce_jsons: List of CE Jsons matching schema defined in ContractExpressVarObj
clear_old_post_processors: If True, unregister all existing handlers to RepeatableStockholderDataMap before
registering new post processors. Good idea generally to ensure no handlers
remain registered from elsewhere in your code base and is True by default.
post_processors (optional): A dictionary mapping stakeholder object data field names to functions which
you want to run on the parsed data - e.g. if your questionnaire has data that
needs to be formatted or parsed.
fail_on_missing_variable: Set to True if you want to get an error if any data fields are missing.
custom_datamap_path: If you want to use a custom datamap, provide path to json file
value_overrides: If provided, inject this variable value lookup into parser which will override anything in CE
Returns: List of valid ocf stakeholder objects
"""
if clear_old_post_processors:
RepeatableStockholderDataMap.clear_handlers()
if post_processors is not None:
RepeatableStockholderDataMap.register_handlers(post_processors)
if value_overrides is None:
value_overrides = {}
stakeholder_datamap = load_ce_to_ocf_stakeholder_datamap(custom_datamap_path)
stockholders_ocf = traverse_datamap(
stakeholder_datamap,
None,
ce_jsons,
value_overrides={"PARSER_VERSION": version, **value_overrides},
fail_on_missing_variable=fail_on_missing_variable,
)
# TODO - improve type checking to check for actual target OCF schema
assert isinstance(stockholders_ocf, list), (
f"Expected stockholders_ocf to be list of dicts, " f"got {type(stockholders_ocf)}"
)
return stockholders_ocf
def parse_ocf_stock_issuances_from_ce_json(
ce_jsons: list[ContractExpressVarObj],
fail_on_missing_variable: bool = False,
common_post_processors: Optional[dict[str, Callable]] = None,
preferred_post_processors: Optional[dict[str, Callable]] = None,
common_datamap_path: Optional[Path] = None,
preferred_datamap_path: Optional[Path] = None,
common_value_overrides: Optional[dict[str, str]] = None,
preferred_value_overrides: Optional[dict[str, str]] = None,
clear_old_post_processors: bool = True,
) -> list[dict]:
"""
Args:
ce_jsons:
fail_on_missing_variable:
common_post_processors:
preferred_post_processors:
common_datamap_path:
common_value_overrides:
preferred_datamap_path:
preferred_value_overrides:
clear_old_post_processors:
Returns:
"""
def drop_fully_vested_vest_term_id(val, ce_jsons) -> str:
"""
Raise a VariableNotFound exception if fully vested which will cause
the key to be dropped entirely.
Args:
val: Variable name
ce_jsons: List of ce jsons
Returns: Original value or, if fully vested, throw an error
"""
if val.split("/")[0] == "Fully Vested":
raise VariableNotFound
else:
return val
if common_value_overrides is None:
common_value_overrides = {}
if preferred_value_overrides is None:
preferred_value_overrides = {}
if clear_old_post_processors:
VestingStockIssuanceDataMap.clear_handlers()
|
def parse_ocf_issuer_from_ce_jsons(
ce_jsons: list[ContractExpressVarObj],
post_processors: Optional[dict[str, Callable]] = None,
fail_on_missing_variable: bool = False,
custom_datamap_path: Optional[Path] = None,
value_overrides: Optional[dict[str, str]] = None,
clear_old_post_processors: bool = True,
) -> dict:
"""
By default, loads our default ce to ocf issuer datamap (though you can provide a path your own JSON datamap) and
uses it to parse a valid OCF issuer object from a list of ce_json objects.
Args:
ce_jsons: List of CE Jsons matching schema defined in ContractExpressVarObj
post_processors (optional): A dictionary mapping stock class object data field names to functions which
you want to run on the parsed data - e.g. if your questionnaire has data that
needs to be formatted or parsed.
fail_on_missing_variable: Set to True if you want to get an error if any data fields are missing.
custom_datamap_path: If you want to use a custom datamap, provide path to json file
value_overrides: If provided, pass to underlying datamap crawler to override specified lookup values in dict
clear_old_post_processors: If True, unregister all handlers for IssuerDataMap before registering any provided
as post_processors
Returns: Valid ocf issuer json
"""
if clear_old_post_processors:
IssuerDataMap.clear_handlers()
if post_processors is not None:
IssuerDataMap.register_handlers(post_processors)
if value_overrides is None:
value_overrides = {}
issuer_datamap = load_ce_to_ocf_issuer_datamap(custom_datamap_path)
parsed_issuer_ocf = traverse_datamap(
issuer_datamap,
None,
ce_jsons,
value_overrides={"PARSER_VERSION": version, **value_overrides},
fail_on_missing_variable=fail_on_missing_variable,
)
# TODO - improve type checking to check for actual target OCF schema
assert isinstance(parsed_issuer_ocf, dict), f"Expected parsed_issuer_ocf to be dict, got {type(parsed_issuer_ocf)}"
return parsed_issuer_ocf
def parse_stock_plan_from_ce_jsons(
ce_jsons: list[ContractExpressVarObj],
post_processors: Optional[dict[str, Callable]] = None,
fail_on_missing_variable: bool = False,
custom_datamap_path: Optional[Path] = None,
value_overrides: Optional[dict[str, str]] = None,
clear_old_post_processors: bool = True,
) -> dict:
"""
By default, loads our default ce to ocf stock plan datamap (though you can provide a path your own JSON datamap)
and uses it to parse a valid OCF stock plan object from a list of ce_json objects.
:param ce_jsons:
:param post_processors:
:param fail_on_missing_variable:
:param custom_datamap_path:
:param value_overrides:
:param clear_old_post_processors:
:return: Valid OCF stock plan
"""
if clear_old_post_processors:
StockPlanDataMap.clear_handlers()
if post_processors is not None:
StockPlanDataMap.register_handlers(post_processors)
if value_overrides is None:
value_overrides = {}
stock_plan_datamap = load_ce_to_ocf_stock_plan_datamap(custom_datamap_path)
stock_plan_ocf = traverse_datamap(
stock_plan_datamap,
None,
ce_jsons,
value_overrides={"PARSER_VERSION": version, **value_overrides},
fail_on_missing_variable=fail_on_missing_variable,
)
# TODO - improve type checking to check for actual target OCF schema
assert isinstance(stock_plan_ocf, dict), f"Expected parsed_issuer_ocf to be dict, got {type(stock_plan_ocf)}"
return stock_plan_ocf
def parse_ocf_stock_class_from_ce_jsons(
ce_jsons: list[ContractExpressVarObj],
common_or_preferred: Literal["COMMON", "PREFERRED"] = "COMMON",
post_processors: Optional[dict[str, Callable]] = None,
fail_on_missing_variable: bool = False,
custom_datamap_path: Optional[Path] = None,
value_overrides: Optional[dict[str, str]] = None,
clear_old_post_processors: bool = True,
) -> dict:
"""
By default, loads our default ce to ocf common stock class datamap (though you can provide a path your own JSON
datamap) and uses it to parse a valid OCF issuer object from a list of ce_json objects. You can change the
common_or_preferred argument to "PREFERRED" to get a preferred stock class.
Args:
ce_jsons: List of CE Jsons matching schema defined in ContractExpressVarObj
common_or_preferred: Set to "COMMON" (default) to parse common stock and "PREFERRED" to parse preferred stock
post_processors (optional): A dictionary mapping stock class object data field names to functions which
you want to run on the parsed data - e.g. if your questionnaire has data that
needs to be formatted or parsed.
fail_on_missing_variable: Set to True if you want to get an error if any data fields are missing.
custom_datamap_path: If you want to use a custom datamap, provide path to json file
value_overrides: If provided, inject this into datamapper and look up values here first. If found, don't
check CE
Returns: Valid ocf stock class json
"""
if clear_old_post_processors:
StockClassDataMap.clear_handlers()
if post_processors is not None:
StockClassDataMap.register_handlers(post_processors)
if value_overrides is None:
value_overrides = {}
if common_or_preferred == "COMMON":
stock_class_datamap = load_ce_to_ocf_stock_class_datamap(custom_datamap_path)
elif common_or_preferred == "PREFERRED":
stock_class_datamap = load_ce_to_ocf_stock_class_datamap(
custom_datamap_path if custom_datamap_path else DEFAULT_CE_TO_OCF_PREFERRED_STOCK_CLASS_ONLY_PATH
)
else:
raise ValueError("We only support COMMON or PREFERRED datamaps")
stock_class_ocf = traverse_datamap(
stock_class_datamap,
None,
ce_jsons,
value_overrides={"PARSER_VERSION": version, **value_overrides},
fail_on_missing_variable=fail_on_missing_variable,
)
# TODO - improve type checking to check for actual target OCF schema
assert isinstance(stock_class_ocf, dict), f"Expected stock_class_ocf to be dict, got {type(stock_class_ocf)}"
return stock_class_ocf
def parse_ocf_stock_legend_from_ce_jsons(
ce_jsons: list[ContractExpressVarObj],
common_or_preferred: Literal["COMMON", "PREFERRED"] = "COMMON",
post_processors: Optional[dict[str, Callable]] = None,
fail_on_missing_variable: bool = False,
custom_datamap_path: Optional[Path] = None,
value_overrides: Optional[dict[str, str]] = None,
clear_old_post_processors: bool = True,
) -> dict:
"""
By default, loads our default ce to ocf common stock legend datamap (though you can provide a path your own JSON
datamap) and uses it to parse a valid OCF stock legend object from a list of ce_json objects. You can change the
common_or_preferred argument to "PREFERRED" to get a preferred stock legends.
Args:
ce_jsons: List of CE Jsons matching schema defined in ContractExpressVarObj
common_or_preferred: Set to "COMMON" (default) to parse common legends and "PREFERRED" to parse preferred legend
post_processors (optional): A dictionary mapping stock legend data field names to functions which
you want to run on the parsed data
fail_on_missing_variable: Set to True if you want to get an error if any data fields are missing.
custom_datamap_path: If you want to use a custom datamap, provide path to json file
value_overrides: If provided, inject this variable value lookup into parser which will override anything in CE
Returns: Valid ocf stock legend json
"""
if clear_old_post_processors:
StockLegendDataMap.clear_handlers()
if post_processors is not None:
StockLegendDataMap.register_handlers(post_processors)
if value_overrides is None:
value_overrides = {}
if common_or_preferred == "COMMON":
stock_legend_datamap = load_ce_to_ocf_stock_legend_datamap(custom_datamap_path)
elif common_or_preferred == "PREFERRED":
stock_legend_datamap = load_ce_to_ocf_stock_legend_datamap(
custom_datamap_path if custom_datamap_path else DEFAULT_CE_TO_OCF_DATAMAP_PREFERRED_STOCK_LEGEND_ONLY_PATH
)
else:
raise ValueError("We only support COMMON or PREFERRED datamaps")
ocf_stock_legend = traverse_datamap(
stock_legend_datamap,
None,
ce_jsons,
value_overrides={"PARSER_VERSION": version, **value_overrides},
fail_on_missing_variable=fail_on_missing_variable,
)
# TODO - improve type checking to check for actual target OCF schema
assert isinstance(ocf_stock_legend, dict), f"Expected ocf_stock_legend to be dict, got {type(ocf_stock_legend)}"
return ocf_stock_legend
def parse_ocf_stakeholders_from_ce_json(
ce_jsons: list[ContractExpressVarObj],
post_processors: Optional[dict[str, Callable]] = None,
clear_old_post_processors: bool = True,
fail_on_missing_variable: bool = False,
custom_datamap_path: Optional[Path] = None,
value_overrides: Optional[dict[str, str]] = None,
) -> list[dict]:
"""
By default, loads our default ce to ocf stakeholder datamap (though you can provide a path your own JSON datamap)
and uses it to parse a list of valid OCF stakeholder objects from a list of ce_json objects.
Args:
ce_jsons: List of CE Jsons matching schema defined in ContractExpressVarObj
clear_old_post_processors: If True, unregister all existing handlers to RepeatableStockholderDataMap before
registering new post processors. Good idea generally to ensure no handlers
remain registered from elsewhere in your code base and is True by default.
post_processors (optional): A dictionary mapping stakeholder object data field names to functions which
you want to run on the parsed data - e.g. if your questionnaire has data that
needs to be formatted or parsed.
fail_on_missing_variable: Set to True if you want to get an error if any data fields are missing.
custom_datamap_path: If you want to use a custom datamap, provide path to json file
value_overrides: If provided, inject this variable value lookup into parser which will override anything in CE
Returns: List of valid ocf stakeholder objects
"""
if clear_old_post_processors:
RepeatableStockholderDataMap.clear_handlers()
if post_processors is not None:
RepeatableStockholderDataMap.register_handlers(post_processors)
if value_overrides is None:
value_overrides = {}
stakeholder_datamap = load_ce_to_ocf_stakeholder_datamap(custom_datamap_path)
stockholders_ocf = traverse_datamap(
stakeholder_datamap,
None,
ce_jsons,
value_overrides={"PARSER_VERSION": version, **value_overrides},
fail_on_missing_variable=fail_on_missing_variable,
)
# TODO - improve type checking to check for actual target OCF schema
assert isinstance(stockholders_ocf, list), (
f"Expected stockholders_ocf to be list of dicts, " f"got {type(stockholders_ocf)}"
)
return stockholders_ocf
def parse_ocf_stock_issuances_from_ce_json(
ce_jsons: list[ContractExpressVarObj],
fail_on_missing_variable: bool = False,
common_post_processors: Optional[dict[str, Callable]] = None,
preferred_post_processors: Optional[dict[str, Callable]] = None,
common_datamap_path: Optional[Path] = None,
preferred_datamap_path: Optional[Path] = None,
common_value_overrides: Optional[dict[str, str]] = None,
preferred_value_overrides: Optional[dict[str, str]] = None,
clear_old_post_processors: bool = True,
) -> list[dict]:
"""
Args:
ce_jsons:
fail_on_missing_variable:
common_post_processors:
preferred_post_processors:
common_datamap_path:
common_value_overrides:
preferred_datamap_path:
preferred_value_overrides:
clear_old_post_processors:
Returns:
"""
def drop_fully_vested_vest_term_id(val, ce_jsons) -> str:
"""
Raise a VariableNotFound exception if fully vested which will cause
the key to be dropped entirely.
Args:
val: Variable name
ce_jsons: List of ce jsons
Returns: Original value or, if fully vested, throw an error
"""
if val.split("/")[0] == "Fully Vested":
raise VariableNotFound
else:
return val
if common_value_overrides is None:
common_value_overrides = {}
if preferred_value_overrides is None:
preferred_value_overrides = {}
if clear_old_post_processors:
VestingStockIssuanceDataMap.clear_handlers() | FullyVestedStockIssuanceDataMap.clear_handlers() | 13 | 2023-11-13 15:50:53+00:00 | 12k |
cyberark/ark-sdk-python | ark_sdk_python/cli_services/dpa/common/ark_dpa_base_policies_editor_service.py | [
{
"identifier": "ArkInquirerRender",
"path": "ark_sdk_python/args/ark_args_formatter.py",
"snippet": "class ArkInquirerRender(ConsoleRender):\n # pylint: disable=keyword-arg-before-vararg,protected-access\n def __init__(self, event_generator=None, *args, **kwargs):\n super().__init__(event_... | import difflib
import itertools
import os
import inquirer
from abc import ABC, abstractmethod
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import Dict, Final, Generic, List, Optional, Tuple, TypeVar
from editor import EditorError
from ark_sdk_python.args.ark_args_formatter import ArkInquirerRender
from ark_sdk_python.auth.ark_isp_auth import ArkISPAuth
from ark_sdk_python.models import ArkServiceException
from ark_sdk_python.models.ark_profile import ArkProfile, ArkProfileLoader
from ark_sdk_python.models.cli_services.dpa.policies_editor.common import (
ArkDPABaseGeneratePolicy,
ArkDPACommitPolicies,
ArkDPAEditPolicies,
ArkDPAGetPoliciesStatus,
ArkDPALoadedPolicies,
ArkDPALoadPolicies,
ArkDPAPoliciesDiff,
ArkDPAPoliciesStatus,
ArkDPARemovePolicies,
ArkDPAResetPolicies,
ArkDPAViewPolicies,
)
from ark_sdk_python.models.services.dpa.policies.common import (
ArkDPABaseAddPolicy,
ArkDPABasePolicy,
ArkDPABasePolicyListItem,
ArkDPABaseUpdatePolicy,
ArkDPADeletePolicy,
ArkDPAGetPolicy,
)
from ark_sdk_python.services.ark_service import ArkService | 8,995 | inquirer.Checkbox(
'names',
f'Which {self._policies_family} policies would you like to view?',
choices=[p.policy_name for p in workspace_policies.values()],
)
],
render=ArkInquirerRender(),
)
if not answers:
return
policy_names = answers['names']
if not policy_names:
return
try:
if view_policies.unified:
inquirer.prompt(
[inquirer.Editor('views', f'Show all selected {self._policies_family} policies')],
answers={
'views': '\n\n\n'.join(
[f'# Policy [{policy_name}]\n{workspace_policies[policy_name].json(indent=4)}' for policy_name in policy_names]
)
},
render=ArkInquirerRender(),
)
else:
inquirer.prompt(
[inquirer.Editor(f'{policy_name}_view', f'Show [{policy_name}]') for policy_name in policy_names],
render=ArkInquirerRender(),
answers={f'{policy_name}_view': workspace_policies[policy_name].json(indent=4) for policy_name in policy_names},
)
except EditorError as ex:
self._logger.error(
f'An error occurred while trying to view the {self._policies_family} policies, '
f'you can view the policies at [{self.__policies_cache_dir}] [{str(ex)}]'
)
def reset_policies(self, reset_policy: ArkDPAResetPolicies) -> None:
"""
Resets local workspace policies.
When all policies are reset, all local policies are overwritten and deleted policies are removed.
Otherwise, the user can select which policies are reset.
This function does not alter newly generated uncommitted policies.
Args:
reset_policy (ArkDPAResetPolicies): _description_
"""
if reset_policy.all:
answers = inquirer.prompt(
[inquirer.Confirm('reset', message=f'Are you sure you want to reset all edited {self._policies_family} policies?')]
)
if not answers:
return
if answers['reset']:
self.load_policies(ArkDPALoadPolicies(override=True))
else:
policies_diff = self.__load_policies_diff()
removed_policies = self.__load_removed_policies_from_workspace()
if not policies_diff and not removed_policies:
return
policy_names = reset_policy.names
if not policy_names:
answers = inquirer.prompt(
[
inquirer.Checkbox(
'names',
f'Which {self._policies_family} policies would you like to reset?, press space to select',
choices=[p for p in policies_diff.keys() + removed_policies.keys()],
)
],
render=ArkInquirerRender(),
)
if not answers:
return
policy_names = answers['names']
policy_names = [p for p in policy_names if p in policies_diff or p in removed_policies]
for policy_name in policy_names:
policy_path = Path(self.__policies_cache_dir) / (policy_name + '.json')
if policy_name in policies_diff:
policy_path.write_text(policies_diff[policy_name][1].json(indent=4))
elif policy_name in removed_policies:
policy_path.write_text(removed_policies[policy_name].json(indent=4))
(Path(self.__policies_cache_dir) / (policy_name + '.json.removed')).unlink(missing_ok=True)
def generate_policy(self, generate_policy: GeneratePolicyType) -> None:
"""
Generates a new policy from a template and the user's parameters.
The user is prompted for the parameters when they are not specified in the CLI.
After policy's parameters are defined, the policy is generates in memory and can bee edited.
The new policy is saved locally until it is committed.
Args:
generate_policy (GeneratePolicyType): _description_
"""
workspace_policies = self.__load_existing_policies_from_workspace()
workspace_policies.update(self.__load_generated_policies_from_workspace())
policy = self._generate_policy(generate_policy, workspace_policies)
policy_path = Path(self.__policies_cache_dir) / (policy.policy_name + '.json.generated')
# Let the user edit the generated policy
if not generate_policy.disable_edit:
try:
answers = inquirer.prompt(
[
inquirer.Editor(
'policy_editor',
f'Newly {self._policies_family} policy is generated and ready to be edited, once edited, it will be saved to the local workspace',
)
],
render=ArkInquirerRender(),
answers={'policy_editor': policy.json(indent=4, exclude_none=True)},
)
if not answers:
return
policy = self.__policy_type.parse_raw(answers['policy_editor'])
except EditorError as ex:
self._logger.error(
f'An error occurred while trying to edit the {self._policies_family} policy, '
f'the policy will be saved to [{policy_path}] and can be edited manually [{str(ex)}]'
)
policy_path.write_text(policy.json(indent=4))
|
MAX_LINE_DIFF: Final[int] = 100000
PolicyType = TypeVar('PolicyType', bound=ArkDPABasePolicy)
PolicyListItemType = TypeVar('PolicyListItemType', bound=ArkDPABasePolicyListItem)
AddPolicyType = TypeVar('AddPolicyType', bound=ArkDPABaseAddPolicy)
UpdatePolicyType = TypeVar('UpdatePolicyType', bound=ArkDPABaseUpdatePolicy)
GeneratePolicyType = TypeVar('GeneratePolicyType', bound=ArkDPABaseGeneratePolicy)
class ArkDPABasePoliciesEditorService(
ArkService, ABC, Generic[PolicyType, PolicyListItemType, AddPolicyType, UpdatePolicyType, GeneratePolicyType]
):
def __init__(
self,
policy_type: PolicyType,
add_policy_type: AddPolicyType,
update_policy_type: UpdatePolicyType,
isp_auth: ArkISPAuth,
policies_family: str,
tenant_id: str,
policies_cache_dir: Optional[str] = None,
profile: Optional[ArkProfile] = None,
) -> None:
super().__init__(isp_auth)
profile = profile or ArkProfileLoader.load_default_profile()
self._policies_family = policies_family
self.__policies_cache_dir = Path(policies_cache_dir or Path.home() / '.ark_cache' / 'profiles' / profile.profile_name / tenant_id)
if not policies_cache_dir and 'ARK_DPA_POLICIES_EDITOR_FOLDER' in os.environ:
self.__policies_cache_dir = Path(os.environ['ARK_DPA_POLICIES_EDITOR_FOLDER'])
self.__policies_cache_dir = self.__policies_cache_dir / policies_family
self.__policies_cache_dir.mkdir(exist_ok=True, parents=True)
self.__policy_type = policy_type
self.__add_policy_type = add_policy_type
self.__update_policy_type = update_policy_type
@abstractmethod
def _policy(self, get_policy: ArkDPAGetPolicy) -> PolicyType:
pass
@abstractmethod
def _list_policies(self) -> List[PolicyListItemType]:
pass
@abstractmethod
def _add_policy(self, add_policy: AddPolicyType) -> PolicyType:
pass
@abstractmethod
def _update_policy(self, update_policy: UpdatePolicyType) -> PolicyType:
pass
@abstractmethod
def _delete_policy(self, delete_policy: ArkDPADeletePolicy) -> None:
pass
@abstractmethod
def _generate_policy(self, generate_policy: GeneratePolicyType, workspace_policies: List[PolicyType]) -> PolicyType:
pass
def __load_policy_diff(self, workspace_policy: PolicyType) -> Optional[Tuple[PolicyType, PolicyType]]:
remote_policy = self._policy(ArkDPAGetPolicy(policy_id=str(workspace_policy.policy_id)))
if remote_policy != workspace_policy:
return (workspace_policy, remote_policy)
return None
def __load_policies_diff(self) -> Dict[str, Tuple[PolicyType, PolicyType]]:
workspace_policies = self.__load_existing_policies_from_workspace()
with ThreadPoolExecutor() as executor:
remote_policies = {
p[0].policy_name: p for p in executor.map(self.__load_policy_diff, workspace_policies.values()) if p is not None
}
return remote_policies
def __load_policies_from_workspace_by_suffix(self, suffix: str = '') -> Dict[str, PolicyType]:
p = Path(self.__policies_cache_dir).glob(f'*.json{suffix}')
policies_files = [x for x in p if x.is_file() and x.suffix == suffix or '.json']
policies = {}
for f in policies_files:
policy = self.__policy_type.parse_file(f)
policies[policy.policy_name] = policy
return policies
def __load_removed_policies_from_workspace(self) -> Dict[str, PolicyType]:
return self.__load_policies_from_workspace_by_suffix('.removed')
def __load_generated_policies_from_workspace(self) -> Dict[str, PolicyType]:
return self.__load_policies_from_workspace_by_suffix('.generated')
def __load_existing_policies_from_workspace(self) -> Dict[str, PolicyType]:
return self.__load_policies_from_workspace_by_suffix()
def __load_policy_to_workspace(self, policy: PolicyListItemType, override: bool) -> Optional[PolicyType]:
policy_data = self._policy(ArkDPAGetPolicy(policy_id=policy.policy_id))
policy_path = Path(self.__policies_cache_dir) / (policy_data.policy_name + '.json')
if policy_path.exists():
existing_data = self.__policy_type.parse_raw(policy_path.read_text())
if existing_data != policy_data:
if not override:
return policy_data
if not policy_data.policy_id:
policy_data.policy_id = policy.policy_id
policy_path.write_text(policy_data.json(indent=4))
(Path(self.__policies_cache_dir) / (policy_data.policy_name + '.json.removed')).unlink(missing_ok=True)
def load_policies(self, load_policies: ArkDPALoadPolicies) -> ArkDPALoadedPolicies:
"""
Loads all remote policies into the local workspace.
The user is asked whether to overwrite existing policies that were edited either locally or remotely.
When default overwrite is enabled, existing policies are overwritten without prompts.
Args:
load_policies (ArkDPALoadPolicies): _description_
Returns:
ArkDPALoadedPolicies: _description_
"""
policies = self._list_policies()
policies_to_query: Dict[str, PolicyType] = []
with ThreadPoolExecutor() as executor:
policies_to_query = {
p.policy_name: p
for p in executor.map(lambda p: self.__load_policy_to_workspace(p, load_policies.override), policies)
if p is not None
}
# Build the query editor to ask the user
policies_to_override = []
if policies_to_query:
answers = inquirer.prompt(
[
inquirer.Checkbox(
'override',
message=f'Conflicts detected, please choose if you wish to override local {self._policies_family} policies or leave them as is',
choices=[p.policy_name for p in policies_to_query.values()],
)
],
render=ArkInquirerRender(),
)
if not answers:
return
policies_to_override = answers['override']
for policy_name in policies_to_override:
policy_path = Path(self.__policies_cache_dir) / (policy_name + '.json')
if policy_path.exists() and policy_name in policies_to_query:
policy_path.write_text(policies_to_query[policy_name].json(indent=4))
return ArkDPALoadedPolicies(
loaded_path=str(self.__policies_cache_dir),
overall_policies_count=len(policies),
loaded_policies_count=len(policies) - len(policies_to_query),
overriden_policies_count=len(policies_to_override),
untouched_policies_count=len(policies_to_query) - len(policies_to_override),
)
def edit_policies(self, edit_policies: ArkDPAEditPolicies) -> None:
"""
Edits the set of specified policies one at a time, either via the CLI or the default OS editor.
Edited policies are only saved locally until they are committed.
Args:
edit_policies (ArkDPAEditPolicies): _description_
Raises:
ArkServiceException: _description_
"""
workspace_policies = self.__load_existing_policies_from_workspace()
workspace_policies.update(self.__load_generated_policies_from_workspace())
if not workspace_policies:
raise ArkServiceException(
f'No {self._policies_family} policies to edit in the workspace, please load the policies or generate a new one'
)
policy_names = edit_policies.names
if not policy_names:
answers = inquirer.prompt(
[
inquirer.Checkbox(
'names',
f'Which {self._policies_family} policies would you like to edit?, press space to select',
choices=[p.policy_name for p in workspace_policies.values()],
)
],
render=ArkInquirerRender(),
)
if not answers:
return
policy_names = answers['names']
try:
answers = inquirer.prompt(
[
inquirer.Editor(f'{name}_edit', message=f'Chosen {self._policies_family} policy [{name}] is about to be edited')
for name in policy_names
],
render=ArkInquirerRender(),
answers={f'{name}_edit': workspace_policies[name].json(indent=4) for name in policy_names},
)
for name in policy_names:
policy = self.__policy_type.parse_raw(answers[f'{name}_edit'])
for path in [
Path(self.__policies_cache_dir) / (name + '.json'),
Path(self.__policies_cache_dir) / (name + '.json.generated'),
]:
if path.exists():
path.write_text(policy.json(indent=4))
break
except EditorError as ex:
self._logger.error(
f'An error occurred while trying to edit {self._policies_family} policies, '
f'you can edit the policies at [{self.__policies_cache_dir}] [{str(ex)}]'
)
def remove_policies(self, remove_policies: ArkDPARemovePolicies) -> None:
"""
Removes one or more policies from the local workspace.
Until changes are committed, removing a remote policy only appends the `.deleted` indication to its name.
After committing the changes, the policies are deleted both locally and remotely.
New, uncommitted policies are deleted locally after the user consents.
Args:
remove_policies (ArkDPARemovePolicies): _description_
Raises:
ArkServiceException: _description_
"""
workspace_policies = self.__load_existing_policies_from_workspace()
workspace_policies.update(self.__load_generated_policies_from_workspace())
if not workspace_policies:
raise ArkServiceException(
f'No {self._policies_family} policies to remove in the workspace, please load the policies or generate a new one'
)
policy_names = remove_policies.names
if not policy_names:
answers = inquirer.prompt(
[
inquirer.Checkbox(
'names',
f'Which {self._policies_family} policies would you like to remove?, press space to select',
choices=[p.policy_name for p in workspace_policies.values()],
)
],
render=ArkInquirerRender(),
)
if not answers:
return
policy_names = answers['names']
for policy_name in policy_names:
for path in [
Path(self.__policies_cache_dir) / (policy_name + '.json'),
Path(self.__policies_cache_dir) / (policy_name + '.json.generated'),
]:
if path.exists():
if path.suffix == '.json':
path.rename(Path(self.__policies_cache_dir) / (policy_name + '.json.removed'))
else:
answers = inquirer.prompt(
[
inquirer.Confirm(
'remove',
message=f'Are you sure you want to remove local {self._policies_family} policy [{policy_name}]?, removing an uncommitted local policy cannot be reverted',
)
],
render=ArkInquirerRender(),
)
if not answers:
return
if answers['remove']:
path.unlink(missing_ok=True)
def view_policies(self, view_policies: ArkDPAViewPolicies) -> None:
"""
Allows the user to view one or more policies either together or individually, as defined in the CLI user prompt.
Policies are viewed in the machine's default editor (both existing policies and newly generated policies).
Args:
view_policies (ArkDPAViewPolicies): _description_
"""
workspace_policies = self.__load_existing_policies_from_workspace()
workspace_policies.update(self.__load_generated_policies_from_workspace())
policy_names = view_policies.names
if not policy_names:
answers = inquirer.prompt(
[
inquirer.Checkbox(
'names',
f'Which {self._policies_family} policies would you like to view?',
choices=[p.policy_name for p in workspace_policies.values()],
)
],
render=ArkInquirerRender(),
)
if not answers:
return
policy_names = answers['names']
if not policy_names:
return
try:
if view_policies.unified:
inquirer.prompt(
[inquirer.Editor('views', f'Show all selected {self._policies_family} policies')],
answers={
'views': '\n\n\n'.join(
[f'# Policy [{policy_name}]\n{workspace_policies[policy_name].json(indent=4)}' for policy_name in policy_names]
)
},
render=ArkInquirerRender(),
)
else:
inquirer.prompt(
[inquirer.Editor(f'{policy_name}_view', f'Show [{policy_name}]') for policy_name in policy_names],
render=ArkInquirerRender(),
answers={f'{policy_name}_view': workspace_policies[policy_name].json(indent=4) for policy_name in policy_names},
)
except EditorError as ex:
self._logger.error(
f'An error occurred while trying to view the {self._policies_family} policies, '
f'you can view the policies at [{self.__policies_cache_dir}] [{str(ex)}]'
)
def reset_policies(self, reset_policy: ArkDPAResetPolicies) -> None:
"""
Resets local workspace policies.
When all policies are reset, all local policies are overwritten and deleted policies are removed.
Otherwise, the user can select which policies are reset.
This function does not alter newly generated uncommitted policies.
Args:
reset_policy (ArkDPAResetPolicies): _description_
"""
if reset_policy.all:
answers = inquirer.prompt(
[inquirer.Confirm('reset', message=f'Are you sure you want to reset all edited {self._policies_family} policies?')]
)
if not answers:
return
if answers['reset']:
self.load_policies(ArkDPALoadPolicies(override=True))
else:
policies_diff = self.__load_policies_diff()
removed_policies = self.__load_removed_policies_from_workspace()
if not policies_diff and not removed_policies:
return
policy_names = reset_policy.names
if not policy_names:
answers = inquirer.prompt(
[
inquirer.Checkbox(
'names',
f'Which {self._policies_family} policies would you like to reset?, press space to select',
choices=[p for p in policies_diff.keys() + removed_policies.keys()],
)
],
render=ArkInquirerRender(),
)
if not answers:
return
policy_names = answers['names']
policy_names = [p for p in policy_names if p in policies_diff or p in removed_policies]
for policy_name in policy_names:
policy_path = Path(self.__policies_cache_dir) / (policy_name + '.json')
if policy_name in policies_diff:
policy_path.write_text(policies_diff[policy_name][1].json(indent=4))
elif policy_name in removed_policies:
policy_path.write_text(removed_policies[policy_name].json(indent=4))
(Path(self.__policies_cache_dir) / (policy_name + '.json.removed')).unlink(missing_ok=True)
def generate_policy(self, generate_policy: GeneratePolicyType) -> None:
"""
Generates a new policy from a template and the user's parameters.
The user is prompted for the parameters when they are not specified in the CLI.
After policy's parameters are defined, the policy is generates in memory and can bee edited.
The new policy is saved locally until it is committed.
Args:
generate_policy (GeneratePolicyType): _description_
"""
workspace_policies = self.__load_existing_policies_from_workspace()
workspace_policies.update(self.__load_generated_policies_from_workspace())
policy = self._generate_policy(generate_policy, workspace_policies)
policy_path = Path(self.__policies_cache_dir) / (policy.policy_name + '.json.generated')
# Let the user edit the generated policy
if not generate_policy.disable_edit:
try:
answers = inquirer.prompt(
[
inquirer.Editor(
'policy_editor',
f'Newly {self._policies_family} policy is generated and ready to be edited, once edited, it will be saved to the local workspace',
)
],
render=ArkInquirerRender(),
answers={'policy_editor': policy.json(indent=4, exclude_none=True)},
)
if not answers:
return
policy = self.__policy_type.parse_raw(answers['policy_editor'])
except EditorError as ex:
self._logger.error(
f'An error occurred while trying to edit the {self._policies_family} policy, '
f'the policy will be saved to [{policy_path}] and can be edited manually [{str(ex)}]'
)
policy_path.write_text(policy.json(indent=4))
| def policies_diff(self, policies_diff: ArkDPAPoliciesDiff) -> None: | 11 | 2023-11-13 09:24:31+00:00 | 12k |
mohenghui/detectAuto_v8 | ultralytics/models/sam/build.py | [
{
"identifier": "attempt_download_asset",
"path": "ultralytics/utils/downloads.py",
"snippet": "def attempt_download_asset(file, repo='ultralytics/assets', release='v0.0.0'):\n \"\"\"\n Attempt file download from GitHub release assets if not found locally.\n\n release = 'latest', 'v6.2', etc.\n... | from functools import partial
from ultralytics.utils.downloads import attempt_download_asset
from .modules.decoders import MaskDecoder
from .modules.encoders import ImageEncoderViT, PromptEncoder
from .modules.sam import Sam
from .modules.tiny_encoder import TinyViT
from .modules.transformer import TwoWayTransformer
import torch | 9,754 | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def build_sam_vit_h(checkpoint=None):
"""Build and return a Segment Anything Model (SAM) h-size model."""
return _build_sam(
encoder_embed_dim=1280,
encoder_depth=32,
encoder_num_heads=16,
encoder_global_attn_indexes=[7, 15, 23, 31],
checkpoint=checkpoint,
)
def build_sam_vit_l(checkpoint=None):
"""Build and return a Segment Anything Model (SAM) l-size model."""
return _build_sam(
encoder_embed_dim=1024,
encoder_depth=24,
encoder_num_heads=16,
encoder_global_attn_indexes=[5, 11, 17, 23],
checkpoint=checkpoint,
)
def build_sam_vit_b(checkpoint=None):
"""Build and return a Segment Anything Model (SAM) b-size model."""
return _build_sam(
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
encoder_global_attn_indexes=[2, 5, 8, 11],
checkpoint=checkpoint,
)
def build_mobile_sam(checkpoint=None):
"""Build and return Mobile Segment Anything Model (Mobile-SAM)."""
return _build_sam(
encoder_embed_dim=[64, 128, 160, 320],
encoder_depth=[2, 2, 6, 2],
encoder_num_heads=[2, 4, 5, 10],
encoder_global_attn_indexes=None,
mobile_sam=True,
checkpoint=checkpoint,
)
def _build_sam(encoder_embed_dim,
encoder_depth,
encoder_num_heads,
encoder_global_attn_indexes,
checkpoint=None,
mobile_sam=False):
"""Builds the selected SAM model architecture."""
prompt_embed_dim = 256
image_size = 1024
vit_patch_size = 16
image_embedding_size = image_size // vit_patch_size
image_encoder = (TinyViT(
img_size=1024,
in_chans=3,
num_classes=1000,
embed_dims=encoder_embed_dim,
depths=encoder_depth,
num_heads=encoder_num_heads,
window_sizes=[7, 7, 14, 7],
mlp_ratio=4.0,
drop_rate=0.0,
drop_path_rate=0.0,
use_checkpoint=False,
mbconv_expand_ratio=4.0,
local_conv_size=3,
layer_lr_decay=0.8,
) if mobile_sam else ImageEncoderViT(
depth=encoder_depth,
embed_dim=encoder_embed_dim,
img_size=image_size,
mlp_ratio=4,
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
num_heads=encoder_num_heads,
patch_size=vit_patch_size,
qkv_bias=True,
use_rel_pos=True,
global_attn_indexes=encoder_global_attn_indexes,
window_size=14,
out_chans=prompt_embed_dim,
))
sam = Sam(
image_encoder=image_encoder,
prompt_encoder=PromptEncoder(
embed_dim=prompt_embed_dim,
image_embedding_size=(image_embedding_size, image_embedding_size),
input_image_size=(image_size, image_size),
mask_in_chans=16,
),
mask_decoder=MaskDecoder(
num_multimask_outputs=3,
transformer=TwoWayTransformer(
depth=2,
embedding_dim=prompt_embed_dim,
mlp_dim=2048,
num_heads=8,
),
transformer_dim=prompt_embed_dim,
iou_head_depth=3,
iou_head_hidden_dim=256,
),
pixel_mean=[123.675, 116.28, 103.53],
pixel_std=[58.395, 57.12, 57.375],
)
if checkpoint is not None:
| # Ultralytics YOLO 🚀, AGPL-3.0 license
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def build_sam_vit_h(checkpoint=None):
"""Build and return a Segment Anything Model (SAM) h-size model."""
return _build_sam(
encoder_embed_dim=1280,
encoder_depth=32,
encoder_num_heads=16,
encoder_global_attn_indexes=[7, 15, 23, 31],
checkpoint=checkpoint,
)
def build_sam_vit_l(checkpoint=None):
"""Build and return a Segment Anything Model (SAM) l-size model."""
return _build_sam(
encoder_embed_dim=1024,
encoder_depth=24,
encoder_num_heads=16,
encoder_global_attn_indexes=[5, 11, 17, 23],
checkpoint=checkpoint,
)
def build_sam_vit_b(checkpoint=None):
"""Build and return a Segment Anything Model (SAM) b-size model."""
return _build_sam(
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
encoder_global_attn_indexes=[2, 5, 8, 11],
checkpoint=checkpoint,
)
def build_mobile_sam(checkpoint=None):
"""Build and return Mobile Segment Anything Model (Mobile-SAM)."""
return _build_sam(
encoder_embed_dim=[64, 128, 160, 320],
encoder_depth=[2, 2, 6, 2],
encoder_num_heads=[2, 4, 5, 10],
encoder_global_attn_indexes=None,
mobile_sam=True,
checkpoint=checkpoint,
)
def _build_sam(encoder_embed_dim,
encoder_depth,
encoder_num_heads,
encoder_global_attn_indexes,
checkpoint=None,
mobile_sam=False):
"""Builds the selected SAM model architecture."""
prompt_embed_dim = 256
image_size = 1024
vit_patch_size = 16
image_embedding_size = image_size // vit_patch_size
image_encoder = (TinyViT(
img_size=1024,
in_chans=3,
num_classes=1000,
embed_dims=encoder_embed_dim,
depths=encoder_depth,
num_heads=encoder_num_heads,
window_sizes=[7, 7, 14, 7],
mlp_ratio=4.0,
drop_rate=0.0,
drop_path_rate=0.0,
use_checkpoint=False,
mbconv_expand_ratio=4.0,
local_conv_size=3,
layer_lr_decay=0.8,
) if mobile_sam else ImageEncoderViT(
depth=encoder_depth,
embed_dim=encoder_embed_dim,
img_size=image_size,
mlp_ratio=4,
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
num_heads=encoder_num_heads,
patch_size=vit_patch_size,
qkv_bias=True,
use_rel_pos=True,
global_attn_indexes=encoder_global_attn_indexes,
window_size=14,
out_chans=prompt_embed_dim,
))
sam = Sam(
image_encoder=image_encoder,
prompt_encoder=PromptEncoder(
embed_dim=prompt_embed_dim,
image_embedding_size=(image_embedding_size, image_embedding_size),
input_image_size=(image_size, image_size),
mask_in_chans=16,
),
mask_decoder=MaskDecoder(
num_multimask_outputs=3,
transformer=TwoWayTransformer(
depth=2,
embedding_dim=prompt_embed_dim,
mlp_dim=2048,
num_heads=8,
),
transformer_dim=prompt_embed_dim,
iou_head_depth=3,
iou_head_hidden_dim=256,
),
pixel_mean=[123.675, 116.28, 103.53],
pixel_std=[58.395, 57.12, 57.375],
)
if checkpoint is not None: | checkpoint = attempt_download_asset(checkpoint) | 0 | 2023-11-16 12:49:59+00:00 | 12k |
i-super/Saleor | saleor/graphql/account/tests/mutations/staff/test_customer_delete.py | [
{
"identifier": "WebhookEventAsyncType",
"path": "saleor/webhook/event_types.py",
"snippet": "class WebhookEventAsyncType:\n ANY = \"any_events\"\n\n ACCOUNT_CONFIRMATION_REQUESTED = \"account_confirmation_requested\"\n ACCOUNT_EMAIL_CHANGED = \"account_email_changed\"\n ACCOUNT_CHANGE_EMAIL... | from functools import partial
from unittest.mock import ANY, Mock, patch
from django.core.exceptions import ValidationError
from django.utils.functional import SimpleLazyObject
from freezegun import freeze_time
from ......webhook.event_types import WebhookEventAsyncType
from .....tests.utils import get_graphql_content
from ....mutations.staff import CustomerDelete
import graphene
import pytest | 7,788 | externalReference
}
}
}
"""
@patch("saleor.account.signals.delete_from_storage_task.delay")
@patch("saleor.graphql.account.mutations.base.account_events.customer_deleted_event")
def test_customer_delete(
mocked_deletion_event,
delete_from_storage_task_mock,
staff_api_client,
staff_user,
customer_user,
image,
permission_manage_users,
media_root,
):
query = CUSTOMER_DELETE_MUTATION
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
customer_user.avatar = image
customer_user.save(update_fields=["avatar"])
variables = {"id": customer_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_users]
)
content = get_graphql_content(response)
data = content["data"]["customerDelete"]
assert data["errors"] == []
assert data["user"]["id"] == customer_id
# Ensure the customer was properly deleted
# and any related event was properly triggered
mocked_deletion_event.assert_called_once_with(
staff_user=staff_user, app=None, deleted_count=1
)
delete_from_storage_task_mock.assert_called_once_with(customer_user.avatar.name)
@freeze_time("2018-05-31 12:00:01")
@patch("saleor.plugins.webhook.plugin.get_webhooks_for_event")
@patch("saleor.plugins.webhook.plugin.trigger_webhooks_async")
def test_customer_delete_trigger_webhook(
mocked_webhook_trigger,
mocked_get_webhooks_for_event,
any_webhook,
staff_api_client,
customer_user,
permission_manage_users,
settings,
):
# given
mocked_get_webhooks_for_event.return_value = [any_webhook]
settings.PLUGINS = ["saleor.plugins.webhook.plugin.WebhookPlugin"]
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
variables = {"id": customer_id}
# when
response = staff_api_client.post_graphql(
CUSTOMER_DELETE_MUTATION, variables, permissions=[permission_manage_users]
)
content = get_graphql_content(response)
data = content["data"]["customerDelete"]
# then
assert data["errors"] == []
assert data["user"]["id"] == customer_id
mocked_webhook_trigger.assert_called_once_with(
None,
WebhookEventAsyncType.CUSTOMER_DELETED,
[any_webhook],
customer_user,
SimpleLazyObject(lambda: staff_api_client.user),
legacy_data_generator=ANY,
)
assert isinstance(
mocked_webhook_trigger.call_args.kwargs["legacy_data_generator"], partial
)
@patch("saleor.account.signals.delete_from_storage_task.delay")
@patch("saleor.graphql.account.mutations.base.account_events.customer_deleted_event")
def test_customer_delete_by_app(
mocked_deletion_event,
delete_from_storage_task_mock,
app_api_client,
app,
customer_user,
image,
permission_manage_users,
media_root,
):
query = CUSTOMER_DELETE_MUTATION
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
customer_user.avatar = image
customer_user.save(update_fields=["avatar"])
variables = {"id": customer_id}
response = app_api_client.post_graphql(
query, variables, permissions=[permission_manage_users]
)
content = get_graphql_content(response)
data = content["data"]["customerDelete"]
assert data["errors"] == []
assert data["user"]["id"] == customer_id
# Ensure the customer was properly deleted
# and any related event was properly triggered
assert mocked_deletion_event.call_count == 1
args, kwargs = mocked_deletion_event.call_args
assert kwargs["deleted_count"] == 1
assert kwargs["staff_user"] is None
assert kwargs["app"] == app
delete_from_storage_task_mock.assert_called_once_with(customer_user.avatar.name)
def test_customer_delete_errors(customer_user, admin_user, staff_user):
info = Mock(context=Mock(user=admin_user))
with pytest.raises(ValidationError) as e:
|
CUSTOMER_DELETE_MUTATION = """
mutation CustomerDelete($id: ID, $externalReference: String) {
customerDelete(id: $id, externalReference: $externalReference) {
errors {
field
message
}
user {
id
externalReference
}
}
}
"""
@patch("saleor.account.signals.delete_from_storage_task.delay")
@patch("saleor.graphql.account.mutations.base.account_events.customer_deleted_event")
def test_customer_delete(
mocked_deletion_event,
delete_from_storage_task_mock,
staff_api_client,
staff_user,
customer_user,
image,
permission_manage_users,
media_root,
):
query = CUSTOMER_DELETE_MUTATION
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
customer_user.avatar = image
customer_user.save(update_fields=["avatar"])
variables = {"id": customer_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_users]
)
content = get_graphql_content(response)
data = content["data"]["customerDelete"]
assert data["errors"] == []
assert data["user"]["id"] == customer_id
# Ensure the customer was properly deleted
# and any related event was properly triggered
mocked_deletion_event.assert_called_once_with(
staff_user=staff_user, app=None, deleted_count=1
)
delete_from_storage_task_mock.assert_called_once_with(customer_user.avatar.name)
@freeze_time("2018-05-31 12:00:01")
@patch("saleor.plugins.webhook.plugin.get_webhooks_for_event")
@patch("saleor.plugins.webhook.plugin.trigger_webhooks_async")
def test_customer_delete_trigger_webhook(
mocked_webhook_trigger,
mocked_get_webhooks_for_event,
any_webhook,
staff_api_client,
customer_user,
permission_manage_users,
settings,
):
# given
mocked_get_webhooks_for_event.return_value = [any_webhook]
settings.PLUGINS = ["saleor.plugins.webhook.plugin.WebhookPlugin"]
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
variables = {"id": customer_id}
# when
response = staff_api_client.post_graphql(
CUSTOMER_DELETE_MUTATION, variables, permissions=[permission_manage_users]
)
content = get_graphql_content(response)
data = content["data"]["customerDelete"]
# then
assert data["errors"] == []
assert data["user"]["id"] == customer_id
mocked_webhook_trigger.assert_called_once_with(
None,
WebhookEventAsyncType.CUSTOMER_DELETED,
[any_webhook],
customer_user,
SimpleLazyObject(lambda: staff_api_client.user),
legacy_data_generator=ANY,
)
assert isinstance(
mocked_webhook_trigger.call_args.kwargs["legacy_data_generator"], partial
)
@patch("saleor.account.signals.delete_from_storage_task.delay")
@patch("saleor.graphql.account.mutations.base.account_events.customer_deleted_event")
def test_customer_delete_by_app(
mocked_deletion_event,
delete_from_storage_task_mock,
app_api_client,
app,
customer_user,
image,
permission_manage_users,
media_root,
):
query = CUSTOMER_DELETE_MUTATION
customer_id = graphene.Node.to_global_id("User", customer_user.pk)
customer_user.avatar = image
customer_user.save(update_fields=["avatar"])
variables = {"id": customer_id}
response = app_api_client.post_graphql(
query, variables, permissions=[permission_manage_users]
)
content = get_graphql_content(response)
data = content["data"]["customerDelete"]
assert data["errors"] == []
assert data["user"]["id"] == customer_id
# Ensure the customer was properly deleted
# and any related event was properly triggered
assert mocked_deletion_event.call_count == 1
args, kwargs = mocked_deletion_event.call_args
assert kwargs["deleted_count"] == 1
assert kwargs["staff_user"] is None
assert kwargs["app"] == app
delete_from_storage_task_mock.assert_called_once_with(customer_user.avatar.name)
def test_customer_delete_errors(customer_user, admin_user, staff_user):
info = Mock(context=Mock(user=admin_user))
with pytest.raises(ValidationError) as e: | CustomerDelete.clean_instance(info, staff_user) | 2 | 2023-11-13 05:00:35+00:00 | 12k |
Aues6uen11Z/Zafkiel | zafkiel/ui/ui.py | [
{
"identifier": "ImageTemplate",
"path": "zafkiel/device/template.py",
"snippet": "class ImageTemplate(Template):\n def __init__(\n self,\n filename: str,\n record_pos: tuple = None,\n keyword: Keyword = None,\n threshold: float = None,\n ... | from zafkiel.device.template import ImageTemplate as Template
from zafkiel.logger import logger
from zafkiel.device.api import API
from zafkiel.ocr.ocr import Ocr
from zafkiel.ui.page import Page
from zafkiel.decorator import run_once
from zafkiel.exception import NotRunningError, PageUnknownError, ScriptError
from zafkiel.timer import Timer
from zafkiel.ui.switch import Switch | 9,333 |
class UI(API):
"""
Processing interface related functions.
Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/tasks/base/ui.py
and https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/ui/switch.py
"""
# Make ui_current mutable so that it can be shared among subclasses of the UI class.
ui_current: dict = {'page': None}
popup_list: list = []
def ui_switch_appear(self, switch: Switch) -> bool:
"""
Args:
switch:
"""
if self.ui_get_current_page().switch != switch:
return False
for data in switch.state_list:
if self.exists(data['check_button']):
return True
return False
def ui_get_current_state(self, switch: Switch) -> str:
"""
Args:
switch:
Returns:
state name or 'unknown'.
"""
if self.ui_current['page'].switch != switch:
logger.warning(f"{self.ui_current['page']} does not have {switch}")
return 'unknown'
for data in switch.state_list:
if self.exists(data['check_button']):
return data['state']
return 'unknown'
def ui_page_appear(self, page: Page, timeout: float = 0) -> bool or tuple:
"""
Args:
page:
timeout: Seconds to find.
Returns:
If found, return tuple of (x, y), else return False.
"""
return self.exists(page.check_button, timeout)
def ui_get_current_page(self):
"""
Returns:
Page:
Raises:
NotRunningError:
PageUnknownError:
"""
@run_once
def app_check():
if not self.app_is_running():
raise NotRunningError("Game not running")
|
class UI(API):
"""
Processing interface related functions.
Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/tasks/base/ui.py
and https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/ui/switch.py
"""
# Make ui_current mutable so that it can be shared among subclasses of the UI class.
ui_current: dict = {'page': None}
popup_list: list = []
def ui_switch_appear(self, switch: Switch) -> bool:
"""
Args:
switch:
"""
if self.ui_get_current_page().switch != switch:
return False
for data in switch.state_list:
if self.exists(data['check_button']):
return True
return False
def ui_get_current_state(self, switch: Switch) -> str:
"""
Args:
switch:
Returns:
state name or 'unknown'.
"""
if self.ui_current['page'].switch != switch:
logger.warning(f"{self.ui_current['page']} does not have {switch}")
return 'unknown'
for data in switch.state_list:
if self.exists(data['check_button']):
return data['state']
return 'unknown'
def ui_page_appear(self, page: Page, timeout: float = 0) -> bool or tuple:
"""
Args:
page:
timeout: Seconds to find.
Returns:
If found, return tuple of (x, y), else return False.
"""
return self.exists(page.check_button, timeout)
def ui_get_current_page(self):
"""
Returns:
Page:
Raises:
NotRunningError:
PageUnknownError:
"""
@run_once
def app_check():
if not self.app_is_running():
raise NotRunningError("Game not running")
| timeout = Timer(10, count=20).start() | 9 | 2023-11-12 09:33:35+00:00 | 12k |
medkit-lib/medkit | medkit/io/srt.py | [
{
"identifier": "Attribute",
"path": "medkit/core/attribute.py",
"snippet": "class Attribute(dict_conv.SubclassMapping):\n \"\"\"\n Medkit attribute, to be added to an annotation\n\n Attributes\n ----------\n label:\n The attribute label\n value:\n The value of the attrib... | import logging
import pysrt
from pathlib import Path
from typing import List, Optional, Union
from medkit.core import (
Attribute,
InputConverter,
OperationDescription,
OutputConverter,
ProvTracer,
generate_id,
)
from medkit.core.audio import AudioDocument, FileAudioBuffer, Segment, Span | 8,081 | prov_tracer:
The provenance tracer used to trace the provenance.
"""
self._prov_tracer = prov_tracer
def load(
self,
srt_dir: Union[str, Path],
audio_dir: Optional[Union[str, Path]] = None,
audio_ext: str = ".wav",
) -> List[AudioDocument]:
"""
Load all .srt files in a directory into a list of
:class:`~medkit.core.audio.document.AudioDocument` objects.
For each .srt file, they must be a corresponding audio file with the
same basename, either in the same directory or in an separated audio
directory.
Parameters
----------
srt_dir:
Directory containing the .srt files.
audio_dir:
Directory containing the audio files corresponding to the .srt files,
if they are not in `srt_dir`.
audio_ext:
File extension to use for audio files.
Returns
-------
List[AudioDocument]
List of generated documents.
"""
srt_dir = Path(srt_dir)
audio_dir = Path(audio_dir) if audio_dir else None
docs = []
for srt_file in sorted(srt_dir.glob("*.srt")):
# corresponding audio file must have same base name with audio extension,
# either in the same directory or in audio_dir if provided
if audio_dir:
audio_file = (audio_dir / srt_file.stem).with_suffix(audio_ext)
else:
audio_file = srt_file.with_suffix(audio_ext)
doc = self.load_doc(srt_file, audio_file)
docs.append(doc)
if len(docs) == 0:
logger.warning(f"No .srt found in '{srt_dir}'")
return docs
def load_doc(self, srt_file: Union[str, Path], audio_file: Union[str, Path]) -> AudioDocument:
"""Load a single .srt file into an
:class:`~medkit.core.audio.document.AudioDocument` containing
turn segments with transcription attributes.
Parameters
----------
srt_file:
Path to the .srt file.
audio_file:
Path to the corresponding audio file.
Returns
-------
AudioDocument:
Generated document.
"""
audio_file = Path(audio_file)
srt_items = pysrt.open(str(srt_file))
full_audio = FileAudioBuffer(path=audio_file)
segments = [self._build_segment(srt_item, full_audio) for srt_item in srt_items]
doc = AudioDocument(audio=full_audio)
for segment in segments:
doc.anns.add(segment)
return doc
def load_segments(self, srt_file: Union[str, Path], audio_file: Union[str, Path]) -> List[Segment]:
"""Load a .srt file and return a list of
:class:`~medkit.core.audio.annotation.Segment` objects corresponding to
turns, with transcription attributes.
Parameters
----------
srt_file:
Path to the .srt file.
audio_file:
Path to the corresponding audio file.
Returns
-------
List[:class:`~medkit.core.audio.annotation.Segment`]:
Turn segments as found in the .srt file, with transcription
attributes attached.
"""
audio_file = Path(audio_file)
srt_items = pysrt.open(str(srt_file))
full_audio = FileAudioBuffer(path=audio_file)
segments = [self._build_segment(srt_item, full_audio) for srt_item in srt_items]
return segments
def _build_segment(self, srt_item: pysrt.SubRipItem, full_audio: FileAudioBuffer) -> Segment:
# milliseconds to seconds
start = srt_item.start.ordinal / 1000
end = srt_item.end.ordinal / 1000
audio = full_audio.trim_duration(start, end)
segment = Segment(label=self.turn_segment_label, span=Span(start, end), audio=audio)
| """
This module needs extra-dependencies not installed as core dependencies of medkit.
To install them, use `pip install medkit-lib[srt-io-convert]`.
"""
__all__ = ["SRTInputConverter", "SRTOutputConverter"]
logger = logging.getLogger(__name__)
class SRTInputConverter(InputConverter):
"""
Convert .srt files containing transcription information into turn segments
with transcription attributes.
For each turn in a .srt file, a
:class:`~medkit.core.audio.annotation.Segment` will be created, with an
associated :class:`~medkit.core.Attribute` holding the transcribed text as
value. The segments can be retrieved directly or as part of an
:class:`~medkit.core.audio.document.AudioDocument` instance.
If a :class:`~medkit.core.ProvTracer` is set, provenance information will be
added for each segment and each attribute (referencing the input converter
as the operation).
"""
def __init__(
self,
turn_segment_label: str = "turn",
transcription_attr_label: str = "transcribed_text",
converter_id: Optional[str] = None,
):
"""
Parameters
----------
turn_segment_label:
Label to use for segments representing turns in the .srt file.
transcription_attr_label:
Label to use for segments attributes containing the transcribed text.
converter_id:
Identifier of the converter.
"""
if converter_id is None:
converter_id = generate_id()
self.uid = converter_id
self.turn_segment_label = turn_segment_label
self.transcription_attr_label = transcription_attr_label
self._prov_tracer: Optional[ProvTracer] = None
@property
def description(self) -> OperationDescription:
"""Contains all the input converter init parameters."""
return OperationDescription(
uid=self.uid,
name=self.__class__.__name__,
class_name=self.__class__.__name__,
config={
"turn_segment_label": self.turn_segment_label,
"transcription_attr_label": self.transcription_attr_label,
},
)
def set_prov_tracer(self, prov_tracer: ProvTracer):
"""Enable provenance tracing.
Parameters
----------
prov_tracer:
The provenance tracer used to trace the provenance.
"""
self._prov_tracer = prov_tracer
def load(
self,
srt_dir: Union[str, Path],
audio_dir: Optional[Union[str, Path]] = None,
audio_ext: str = ".wav",
) -> List[AudioDocument]:
"""
Load all .srt files in a directory into a list of
:class:`~medkit.core.audio.document.AudioDocument` objects.
For each .srt file, they must be a corresponding audio file with the
same basename, either in the same directory or in an separated audio
directory.
Parameters
----------
srt_dir:
Directory containing the .srt files.
audio_dir:
Directory containing the audio files corresponding to the .srt files,
if they are not in `srt_dir`.
audio_ext:
File extension to use for audio files.
Returns
-------
List[AudioDocument]
List of generated documents.
"""
srt_dir = Path(srt_dir)
audio_dir = Path(audio_dir) if audio_dir else None
docs = []
for srt_file in sorted(srt_dir.glob("*.srt")):
# corresponding audio file must have same base name with audio extension,
# either in the same directory or in audio_dir if provided
if audio_dir:
audio_file = (audio_dir / srt_file.stem).with_suffix(audio_ext)
else:
audio_file = srt_file.with_suffix(audio_ext)
doc = self.load_doc(srt_file, audio_file)
docs.append(doc)
if len(docs) == 0:
logger.warning(f"No .srt found in '{srt_dir}'")
return docs
def load_doc(self, srt_file: Union[str, Path], audio_file: Union[str, Path]) -> AudioDocument:
"""Load a single .srt file into an
:class:`~medkit.core.audio.document.AudioDocument` containing
turn segments with transcription attributes.
Parameters
----------
srt_file:
Path to the .srt file.
audio_file:
Path to the corresponding audio file.
Returns
-------
AudioDocument:
Generated document.
"""
audio_file = Path(audio_file)
srt_items = pysrt.open(str(srt_file))
full_audio = FileAudioBuffer(path=audio_file)
segments = [self._build_segment(srt_item, full_audio) for srt_item in srt_items]
doc = AudioDocument(audio=full_audio)
for segment in segments:
doc.anns.add(segment)
return doc
def load_segments(self, srt_file: Union[str, Path], audio_file: Union[str, Path]) -> List[Segment]:
"""Load a .srt file and return a list of
:class:`~medkit.core.audio.annotation.Segment` objects corresponding to
turns, with transcription attributes.
Parameters
----------
srt_file:
Path to the .srt file.
audio_file:
Path to the corresponding audio file.
Returns
-------
List[:class:`~medkit.core.audio.annotation.Segment`]:
Turn segments as found in the .srt file, with transcription
attributes attached.
"""
audio_file = Path(audio_file)
srt_items = pysrt.open(str(srt_file))
full_audio = FileAudioBuffer(path=audio_file)
segments = [self._build_segment(srt_item, full_audio) for srt_item in srt_items]
return segments
def _build_segment(self, srt_item: pysrt.SubRipItem, full_audio: FileAudioBuffer) -> Segment:
# milliseconds to seconds
start = srt_item.start.ordinal / 1000
end = srt_item.end.ordinal / 1000
audio = full_audio.trim_duration(start, end)
segment = Segment(label=self.turn_segment_label, span=Span(start, end), audio=audio) | transcription_attr = Attribute(label=self.transcription_attr_label, value=srt_item.text) | 0 | 2023-11-13 16:28:56+00:00 | 12k |
interpretml/LLM-Tabular-Memorization-Checker | tabmemcheck/functions.py | [
{
"identifier": "LLM_Interface",
"path": "tabmemcheck/llm.py",
"snippet": "class LLM_Interface:\n \"\"\"The interface to the language model.\"\"\"\n\n # if true, the tests use the chat_completion function, otherwise the completion function\n chat_mode = False\n\n def completion(self, prompt,... | import os
import numpy as np
import pandas as pd
import tabmemcheck as tabmem
import tabmemcheck.analysis as analysis
import tabmemcheck.utils as utils
from typing import Any, Union
from difflib import SequenceMatcher
from tabmemcheck.llm import (
LLM_Interface,
ChatWrappedLLM,
send_chat_completion,
send_completion,
bcolors,
)
from tabmemcheck.row_independence import statistical_feature_prediction_test
from tabmemcheck.chat_completion import (
prefix_suffix_chat_completion,
row_chat_completion,
row_completion,
feature_values_chat_completion,
) | 8,039 | frac_duplicates = 1 - len(set(rows)) / len(rows)
if frac_duplicates == 0:
print(
bcolors.BOLD
+ "Info: "
+ bcolors.ENDC
+ "All the rows in the dataset are unique."
)
else:
print(
bcolors.BOLD
+ "Info: "
+ bcolors.ENDC
+ f"{100*frac_duplicates:.2f}% of the rows in this dataset are duplicates."
)
# ask the model to perform row chat completion (execute the the prompt)
if llm.chat_mode:
test_prefixes, test_suffixes, responses = row_chat_completion(
llm,
csv_file,
system_prompt,
num_prefix_rows,
num_queries,
few_shot,
out_file,
)
else:
test_prefixes, test_suffixes, responses = row_completion(
llm, csv_file, num_prefix_rows, num_queries, out_file
)
# count the number of exact matches
# NOTE here we assume that the test suffix is a single row that is unique, i.e. no duplicate rows
num_exact_matches = 0
for test_suffix, response in zip(test_suffixes, responses):
if test_suffix.strip() in response.strip():
num_exact_matches += 1
# the statistical test using the levenshtein distance TODO taken out of current version although it works
# test_prefix_rows = [prefix.split("\n") for prefix in test_prefixes]
# test_result = analysis.levenshtein_distance_t_test(
# responses, test_suffixes, test_prefix_rows
# )
# print the result
print(
bcolors.BOLD
+ "Row Completion Test: "
+ bcolors.ENDC
+ f"{num_exact_matches}/{num_queries} exact matches."
# + bcolors.BOLD
# + "\nLevenshtein distance test (p-value): "
# + bcolors.ENDC
# + f"{test_result.pvalue:.3f}."
)
return test_prefixes, test_suffixes, responses
####################################################################################
# Feature Completion
####################################################################################
def feature_completion_test(
csv_file: str,
llm: Union[LLM_Interface, str],
feature_name: str = None,
num_queries=100,
few_shot=5,
out_file=None,
system_prompt: str = "default",
):
"""Feature completion test where we attempt to predict a single rare feature & count the number of exact matches.
The basic prompt format is the following:
System: <system_prompt>
User: Feature 1 = value 1, Feature 2 = value 2, ..., Feature n = value n
Response: Feature {feature_name} = value
This can be modified in the following ways:
- Include few-shot examples from other csv files.
- Don't use the feature names, but only the values.
"""
llm = __llm_setup(llm)
# TODO statistical analysis of the uniqueness of the feature (i.e., is the test appropriate?!)
if system_prompt == "default": # default system prompt?
system_prompt = tabmem.config.system_prompts["feature-completion"]
# if no feature value is provided, automatically select the most unique feature
if feature_name is None:
feature_name, frac_unique_values = analysis.find_most_unique_feature(csv_file)
print(
bcolors.BOLD
+ "Info: "
+ bcolors.ENDC
+ f"Using feature {feature_name} with {100*frac_unique_values:.2f}% unique values."
)
# all the other features are the conditional features
feature_names = utils.get_feature_names(csv_file)
cond_feature_names = [f for f in feature_names if f != feature_name]
if not llm.chat_mode: # wrap base model to take chat queries
def build_prompt(messages):
prompt = ""
for m in messages:
if m["role"] == "user":
prompt += m["content"]
elif m["role"] == "assistant":
prompt += ", " + m["content"] + "\n\n"
prompt += ", "
return prompt
llm = ChatWrappedLLM(llm, build_prompt, ends_with="\n\n")
# execute the prompt
|
DEFAULT_FEW_SHOT_CSV_FILES = [
"iris.csv",
"adult-train.csv",
"titanic-train.csv",
"uci-wine.csv",
"california-housing.csv",
]
def __difflib_similar(csv_file_1, csv_file_2):
sm = SequenceMatcher(
None, utils.load_csv_string(csv_file_1), utils.load_csv_string(csv_file_2)
)
if sm.quick_ratio() > 0.9:
return sm.ratio() > 0.9
return False
def __validate_few_shot_files(csv_file, few_shot_csv_files):
"""check if the csv_file is contained in the few_shot_csv_files."""
dataset_name = utils.get_dataset_name(csv_file)
few_shot_names = [utils.get_dataset_name(x) for x in few_shot_csv_files]
if dataset_name in few_shot_names:
# replace the dataset_name with open-ml diabetes
few_shot_csv_files = [
x for x in few_shot_csv_files if utils.get_dataset_name(x) != dataset_name
]
few_shot_csv_files.append("openml-diabetes.csv")
# now test with difflib if the dataset contents are very similar
for fs_file in few_shot_csv_files:
if __difflib_similar(csv_file, fs_file):
print(
bcolors.BOLD
+ "Warning: "
+ bcolors.ENDC
+ f"The dataset is very similar to the few-shot dataset {utils.get_dataset_name(fs_file)}."
)
return few_shot_csv_files
def __llm_setup(llm: Union[LLM_Interface, str]):
# if llm is a string, assume open ai model
if isinstance(llm, str):
llm = tabmem.openai_setup(llm)
return llm
def __print_info(csv_file, llm, few_shot_csv_files):
"""Print some information about the csv file and the model."""
print(
bcolors.BOLD
+ "Dataset: "
+ bcolors.ENDC
+ f"{utils.get_dataset_name(csv_file)}"
)
print(bcolors.BOLD + "Model: " + bcolors.ENDC + f"{llm}")
print(
bcolors.BOLD
+ "Few-Shot: "
+ bcolors.ENDC
+ ", ".join(
[utils.get_dataset_name(fs_csv_file) for fs_csv_file in few_shot_csv_files]
)
)
####################################################################################
# All the tests
####################################################################################
def run_all_tests(
csv_file: str,
llm: Union[LLM_Interface, str],
few_shot_csv_files=DEFAULT_FEW_SHOT_CSV_FILES,
feature_name=None,
):
llm = __llm_setup(llm)
few_shot_csv_files = __validate_few_shot_files(csv_file, few_shot_csv_files)
__print_info(csv_file, llm, few_shot_csv_files)
feature_names_test(csv_file, llm, few_shot_csv_files=few_shot_csv_files)
# todo feature values
header_test(csv_file, llm, few_shot_csv_files=few_shot_csv_files)
# draw 10 zero-knowledge samples
print(
bcolors.BOLD
+ "Drawing 10 zero-knowledge samples at temperature 0.7:"
+ bcolors.ENDC
)
temp = tabmem.config.temperature
tabmem.config.temperature = 0.7
samples_df = sample(
csv_file, llm, num_queries=10, few_shot_csv_files=few_shot_csv_files
)
# print the data frame unless it is empty
if (not samples_df.empty) and len(samples_df) > 0:
pd.set_option("display.expand_frame_repr", False)
print(samples_df)
if len(samples_df) < 10:
print(f"The model provided {len(samples_df)} valid samples.")
else:
print("The model was not able to provide valid samples.")
tabmem.config.temperature = temp
row_completion_test(csv_file, llm, num_queries=25)
feature_completion_test(csv_file, llm, num_queries=25, feature_name=feature_name)
first_token_test(csv_file, llm, num_queries=25)
####################################################################################
# Feature Names
####################################################################################
def feature_names_test(
csv_file: str,
llm: Union[LLM_Interface, str],
num_prefix_features: int = None,
few_shot_csv_files=DEFAULT_FEW_SHOT_CSV_FILES,
system_prompt: str = "default",
):
"""Test if the model knows the names of the features.
The prompt format is:
System: <system_prompt>
User: Dataset: <dataset_name>
Feature 1, Feature 2, ..., Feature n
Response: Feature n+1, Feature n+2, ..., Feature m
This can be modified in the following ways:
- Include few-shot examples from other csv files.
"""
llm = __llm_setup(llm)
few_shot_csv_files = __validate_few_shot_files(csv_file, few_shot_csv_files)
# default system prompt?
if system_prompt == "default":
system_prompt = tabmem.config.system_prompts["feature-names"]
dataset_name = utils.get_dataset_name(csv_file)
feature_names = utils.get_feature_names(csv_file)
# by default, use 1/4 of the features as prefix, but at least one
if num_prefix_features is None:
num_prefix_features = max(1, len(feature_names) // 4)
# remove the current csv file from the few-shot csv files should it be present there
few_shot_csv_files = [x for x in few_shot_csv_files if not dataset_name in x]
# setup for the few-shot examples
fs_dataset_names = [utils.get_dataset_name(x) for x in few_shot_csv_files]
fs_feature_names = [
utils.get_feature_names(fs_csv_file) for fs_csv_file in few_shot_csv_files
]
fs_prefix_feature = [
utils.adjust_num_prefix_features(csv_file, num_prefix_features, fs_csv_file)
for fs_csv_file in few_shot_csv_files
]
if llm.chat_mode:
# construt the prompt
prefixes = [
f"Dataset: {dataset_name}. Feature Names: "
+ ", ".join(feature_names[:num_prefix_features])
]
suffixes = [", ".join(feature_names[num_prefix_features:])]
few_shot = []
for fs_dataset_name, fs_feature_name, fs_prefix_feature in zip(
fs_dataset_names, fs_feature_names, fs_prefix_feature
):
few_shot.append(
(
[
f"Dataset: {fs_dataset_name}. Feature Names: "
+ ", ".join(fs_feature_name[:fs_prefix_feature])
],
[", ".join(fs_feature_name[fs_prefix_feature:])],
)
)
# execute the the prompt
_, _, responses = prefix_suffix_chat_completion(
llm,
prefixes,
suffixes,
system_prompt,
few_shot=few_shot,
num_queries=1,
)
response = responses[0]
else:
# construct the prompt
prompt = ""
for fs_dataset_name, fs_feature_name, fs_prefix_feature in zip(
fs_dataset_names, fs_feature_names, fs_prefix_feature
):
prompt += (
f"Dataset: {fs_dataset_name}.\nNumber of Features: {len(fs_feature_name)}\nFeature Names: "
+ ", ".join(fs_feature_name)
+ "\n\n"
)
prompt += (
f"Dataset: {dataset_name}\nNumber of Features: {len(feature_names)}\nFeature Names: "
+ ", ".join(feature_names[:num_prefix_features])
+ ", "
)
# execute the prompt
response = send_completion(llm, prompt)
# consider the response only until the first '\n\n'
idx = response.find("\n\n")
if idx != -1:
response = response[:idx]
print(
bcolors.BOLD
+ "Feature Names Test\nFeature Names: "
+ bcolors.ENDC
+ ", ".join(feature_names[num_prefix_features:])
+ bcolors.BOLD
+ "\nModel Generation: "
+ bcolors.ENDC
+ response
)
# TODO do some sort of evaluation
# for example, return true if it completes all but X of the feature names, correcting for upper/lower case
# at least do formatted printing of the results
####################################################################################
# Feature Values
####################################################################################
####################################################################################
# Header Test
####################################################################################
def header_test(
csv_file: str,
llm: Union[LLM_Interface, str],
split_rows: list[int] = [2, 4, 6, 8],
completion_length: int = 500,
few_shot_csv_files: list[str] = DEFAULT_FEW_SHOT_CSV_FILES,
system_prompt: str = "default",
):
"""Header test, using other csv files as few-shot examples.
Splits the csv file at random positions in rows 2, 4, 6, and 8. Performs 1 query for each split. Reports the best completion.
NOTE: This test might fail if the header and rows of the csv file are very long, and the model has a small context window.
NOTE: in the end, this is the case for all of our tests :)
"""
llm = __llm_setup(llm)
few_shot_csv_files = __validate_few_shot_files(csv_file, few_shot_csv_files)
# default system prompt?
if system_prompt == "default":
system_prompt = tabmem.config.system_prompts["header"]
# load the csv file as a single contiguous string. also load the rows to determine offsets within the string
data = utils.load_csv_string(csv_file, header=True)
csv_rows = utils.load_csv_rows(csv_file, header=True)
# load the few-shot examples
few_shot_data = []
for fs_csv_file in few_shot_csv_files:
fs_data = utils.load_csv_string(fs_csv_file, header=True)
few_shot_data.append(fs_data)
# perform the test multiple times, cutting the dataset at random positions in rows split_rows
num_completions = -1
header, completion = None, None
for i_row in split_rows:
offset = np.sum([len(row) for row in csv_rows[: i_row - 1]])
offset += np.random.randint(
len(csv_rows[i_row]) // 3, 2 * len(csv_rows[i_row]) // 3
)
prefixes = [data[:offset]]
suffixes = [data[offset : offset + completion_length]]
few_shot = [
([fs_data[:offset]], [fs_data[offset : offset + completion_length]])
for fs_data in few_shot_data
]
# chat mode: use few-shot examples
if llm.chat_mode:
_, _, response = prefix_suffix_chat_completion(
llm, prefixes, suffixes, system_prompt, few_shot=few_shot, num_queries=1
)
response = response[0]
else: # otherwise, plain completion
response = send_completion(llm, prefixes[0])
# find the first digit where the response and the completion disagree
idx = -1000
for idx, (c, r) in enumerate(zip(data[offset:], response)):
if c != r:
break
if idx == len(response) - 1 and response[idx] == data[offset + idx]:
idx += 1 # no disagreement found, set idx to length of the response
# is this the best completion so far?
if idx > num_completions:
num_completions = idx
header = prefixes[0]
completion = response
# for the printing, we first color all green up to the first disagreement
completion_print = bcolors.Green + completion[:num_completions]
# then color red up to the beginning of the next row, if any
remaining_completion = completion[num_completions:]
idx = remaining_completion.find("\n")
if idx == -1:
completion_print += bcolors.Red + remaining_completion
else:
completion_print += bcolors.Red + remaining_completion[:idx] + "\n"
remaining_completion = remaining_completion[idx + 1 :]
# for all additional rows, green up to the first disagreement, all red after that
completion_rows = remaining_completion.split("\n")
# the corresponding next row in the csv file
data_idx = data[len(header) + num_completions :].find("\n")
data_rows = data[len(header) + num_completions + data_idx + 1 :].split("\n")
for completion_row, data_row in zip(completion_rows, data_rows):
if completion_row == data_row:
completion_print += bcolors.Green + completion_row + "\n"
continue
# not equal, find the first disagreement
idx = -1000
for idx, (c, r) in enumerate(zip(data_row, completion_row)):
if c != r:
break
if idx == len(completion_row) - 1 and completion_row[idx] == data_row[idx]:
idx += 1
# print first part green, second part red
completion_print += (
bcolors.Green
+ completion_row[:idx]
+ bcolors.Red
+ completion_row[idx:]
+ "\n"
)
# remove final new line
completion_print = completion_print.rstrip("\n")
# print the result
print(
bcolors.BOLD
+ "Header Test: "
+ bcolors.ENDC
+ bcolors.Black
+ header
+ completion_print
+ bcolors.ENDC
+ bcolors.BOLD
+ "\nHeader Test Legend: "
+ bcolors.ENDC
+ "Prompt "
+ bcolors.Green
+ "Correct "
+ bcolors.Red
+ "Incorrect"
+ bcolors.ENDC
)
# TODO return true if it completes the given row, as well as the next row.
# TODO count the number of correctly completed rows and print this number
####################################################################################
# Row Completion
####################################################################################
def row_completion_test(
csv_file: str,
llm: Union[LLM_Interface, str],
num_prefix_rows=10,
num_queries=50,
few_shot=7,
out_file=None,
system_prompt: str = "default",
):
"""Row completion test: Complete the next row of the csv file, given the previous rows."""
llm = __llm_setup(llm)
if system_prompt == "default": # default system prompt?
system_prompt = tabmem.config.system_prompts["row-completion"]
# what fraction of the rows are duplicates?
rows = utils.load_csv_rows(csv_file)
frac_duplicates = 1 - len(set(rows)) / len(rows)
if frac_duplicates == 0:
print(
bcolors.BOLD
+ "Info: "
+ bcolors.ENDC
+ "All the rows in the dataset are unique."
)
else:
print(
bcolors.BOLD
+ "Info: "
+ bcolors.ENDC
+ f"{100*frac_duplicates:.2f}% of the rows in this dataset are duplicates."
)
# ask the model to perform row chat completion (execute the the prompt)
if llm.chat_mode:
test_prefixes, test_suffixes, responses = row_chat_completion(
llm,
csv_file,
system_prompt,
num_prefix_rows,
num_queries,
few_shot,
out_file,
)
else:
test_prefixes, test_suffixes, responses = row_completion(
llm, csv_file, num_prefix_rows, num_queries, out_file
)
# count the number of exact matches
# NOTE here we assume that the test suffix is a single row that is unique, i.e. no duplicate rows
num_exact_matches = 0
for test_suffix, response in zip(test_suffixes, responses):
if test_suffix.strip() in response.strip():
num_exact_matches += 1
# the statistical test using the levenshtein distance TODO taken out of current version although it works
# test_prefix_rows = [prefix.split("\n") for prefix in test_prefixes]
# test_result = analysis.levenshtein_distance_t_test(
# responses, test_suffixes, test_prefix_rows
# )
# print the result
print(
bcolors.BOLD
+ "Row Completion Test: "
+ bcolors.ENDC
+ f"{num_exact_matches}/{num_queries} exact matches."
# + bcolors.BOLD
# + "\nLevenshtein distance test (p-value): "
# + bcolors.ENDC
# + f"{test_result.pvalue:.3f}."
)
return test_prefixes, test_suffixes, responses
####################################################################################
# Feature Completion
####################################################################################
def feature_completion_test(
csv_file: str,
llm: Union[LLM_Interface, str],
feature_name: str = None,
num_queries=100,
few_shot=5,
out_file=None,
system_prompt: str = "default",
):
"""Feature completion test where we attempt to predict a single rare feature & count the number of exact matches.
The basic prompt format is the following:
System: <system_prompt>
User: Feature 1 = value 1, Feature 2 = value 2, ..., Feature n = value n
Response: Feature {feature_name} = value
This can be modified in the following ways:
- Include few-shot examples from other csv files.
- Don't use the feature names, but only the values.
"""
llm = __llm_setup(llm)
# TODO statistical analysis of the uniqueness of the feature (i.e., is the test appropriate?!)
if system_prompt == "default": # default system prompt?
system_prompt = tabmem.config.system_prompts["feature-completion"]
# if no feature value is provided, automatically select the most unique feature
if feature_name is None:
feature_name, frac_unique_values = analysis.find_most_unique_feature(csv_file)
print(
bcolors.BOLD
+ "Info: "
+ bcolors.ENDC
+ f"Using feature {feature_name} with {100*frac_unique_values:.2f}% unique values."
)
# all the other features are the conditional features
feature_names = utils.get_feature_names(csv_file)
cond_feature_names = [f for f in feature_names if f != feature_name]
if not llm.chat_mode: # wrap base model to take chat queries
def build_prompt(messages):
prompt = ""
for m in messages:
if m["role"] == "user":
prompt += m["content"]
elif m["role"] == "assistant":
prompt += ", " + m["content"] + "\n\n"
prompt += ", "
return prompt
llm = ChatWrappedLLM(llm, build_prompt, ends_with="\n\n")
# execute the prompt | _, test_suffixes, responses = feature_values_chat_completion( | 9 | 2023-11-14 18:34:51+00:00 | 12k |
WindowsSov8forUs/bestdori_api | bestdori/songs.py | [
{
"identifier": "Chart",
"path": "bestdori/charts.py",
"snippet": "class Chart(list[NoteType]):\n '''谱面类,统合针对谱面的一层操作\n\n 参数:\n chart (list[dict[str, Any]]): 原始谱面代码'''\n # 初始化\n def __init__(self, chart: list[dict[str, Any]]) -> None:\n '''谱面类,统合针对谱面的一层操作\n\n 参数:\n ... | from typing import Optional, Literal, Any
from requests.exceptions import HTTPError
from .charts import Chart
from .post import get_list
from .utils.utils import ASSETS, API
from .utils.network import Assets, Api
from .exceptions import (
DiffNotExistError,
SongNotExistError
) | 8,202 | proxy: Optional[str]=None) -> None:
'''歌曲封面类'''
self._index: int = index
'''数据包序列号'''
self._jacket_image: str = jacket_image
'''封面文件名'''
self._server: Literal['jp', 'en', 'tw', 'cn', 'kr'] = server
'''封面所在服务器'''
self._proxy: Optional[str] = proxy
'''代理服务器'''
return
# 获取封面 url
@property
def url(self) -> str:
'''获取封面 url'''
return Assets(
ASSETS['songs']['musicjacket'].format(
index=self._index, jacket_image=self._jacket_image
), self._server, self._proxy
).get_url()
# 获取封面字节数据
@property
def bytes(self) -> bytes:
'''获取封面字节数据'''
return Assets(
ASSETS['songs']['musicjacket'].format(
index=self._index, jacket_image=self._jacket_image
), self._server, self._proxy
).get()
# 歌曲类
class Song:
'''歌曲类
参数:
id_ (int): 歌曲 ID
proxy (Optional[str], optional): 代理服务器
'''
# 初始化
def __init__(self, id_: int, proxy: Optional[str]=None) -> None:
'''歌曲类
参数:
id_ (int): 歌曲 ID
proxy (Optional[str], optional): 代理服务器
'''
self.id: int = id_
'''歌曲 ID'''
self._info: dict[str, Any] = {}
'''歌曲信息'''
self.proxy: Optional[str] = proxy
'''代理服务器'''
# 检测 ID 是否存在
all_id = get_all(0, proxy=proxy)
if not str(id_) in all_id.keys():
raise SongNotExistError(id_)
return
# 获取歌曲信息
def get_info(self) -> dict[str, Any]:
'''获取歌曲信息
返回:
dict[str, Any]: 歌曲详细信息
'''
if len(self._info) <= 0:
# 如果没有歌曲信息存储
response = Api(
API['songs']['info'].format(id=self.id), proxy=self.proxy
).request('get')
self._info = dict(response.json())
return self._info
# 获取歌曲所在服务器
@property
def server(self) -> Literal['jp', 'en', 'tw', 'cn', 'kr']:
'''获取歌曲所在服务器
返回:
Literal['jp', 'en', 'tw', 'cn', 'kr']: 歌曲所在服务器
'''
info = self.get_info()
# 获取 publishedAt 数据
if (published_at := info.get('publishedAt', None)) is None:
raise Exception('无法获取歌曲发布时间。')
# 根据 publishedAt 数据判断服务器
if published_at[0] is not None: return 'jp'
elif published_at[1] is not None: return 'en'
elif published_at[2] is not None: return 'tw'
elif published_at[3] is not None: return 'cn'
elif published_at[4] is not None: return 'kr'
else:
raise Exception('无法获取歌曲服务器。')
# 获取歌曲名称
@property
def name(self) -> str:
'''获取歌曲名称
返回:
str: 歌曲名称
'''
info = self.get_info()
# 获取 musicTitle 数据
if (music_title := info.get('musicTitle', None)) is None:
raise Exception('无法获取歌曲名称。')
# 获取第一个非 None 歌曲名称
try:
return next(filter(lambda x: x is not None, music_title))
except StopIteration:
raise Exception('无法获取歌曲名称。')
# 获取歌曲谱面
def get_chart(
self,
diff: Literal['easy', 'normal', 'hard', 'expert', 'special']='expert'
| '''`bestdori.songs`
BanG Dream! 歌曲相关操作'''
# 获取总歌曲信息
def get_all(index: Literal[0, 5]=5, proxy: Optional[str]=None) -> dict[str, dict[str, Any]]:
'''获取总歌曲信息
参数:
index (Literal[0, 5], optional): 指定获取哪种 `all.json`
`0`: 仅获取所有已有歌曲 ID `all.0.json`
`5`: 获取所有已有歌曲的简洁信息 `all.5.json`,默认为该项
proxy (Optional[str], optional): 代理服务器
返回:
dict[str, dict[str, Any]]: 获取到的总歌曲信息
'''
return Api(API['songs']['all'].format(index=index), proxy=proxy).request('get').json()
# 歌曲封面内部类
class Jacket:
'''歌曲封面类
参数:
url (str): 封面链接
bytes (bytes): 封面字节数据
'''
# 初始化
def __init__(
self,
index: int,
jacket_image: str,
server: Literal['jp', 'en', 'tw', 'cn', 'kr'],
proxy: Optional[str]=None) -> None:
'''歌曲封面类'''
self._index: int = index
'''数据包序列号'''
self._jacket_image: str = jacket_image
'''封面文件名'''
self._server: Literal['jp', 'en', 'tw', 'cn', 'kr'] = server
'''封面所在服务器'''
self._proxy: Optional[str] = proxy
'''代理服务器'''
return
# 获取封面 url
@property
def url(self) -> str:
'''获取封面 url'''
return Assets(
ASSETS['songs']['musicjacket'].format(
index=self._index, jacket_image=self._jacket_image
), self._server, self._proxy
).get_url()
# 获取封面字节数据
@property
def bytes(self) -> bytes:
'''获取封面字节数据'''
return Assets(
ASSETS['songs']['musicjacket'].format(
index=self._index, jacket_image=self._jacket_image
), self._server, self._proxy
).get()
# 歌曲类
class Song:
'''歌曲类
参数:
id_ (int): 歌曲 ID
proxy (Optional[str], optional): 代理服务器
'''
# 初始化
def __init__(self, id_: int, proxy: Optional[str]=None) -> None:
'''歌曲类
参数:
id_ (int): 歌曲 ID
proxy (Optional[str], optional): 代理服务器
'''
self.id: int = id_
'''歌曲 ID'''
self._info: dict[str, Any] = {}
'''歌曲信息'''
self.proxy: Optional[str] = proxy
'''代理服务器'''
# 检测 ID 是否存在
all_id = get_all(0, proxy=proxy)
if not str(id_) in all_id.keys():
raise SongNotExistError(id_)
return
# 获取歌曲信息
def get_info(self) -> dict[str, Any]:
'''获取歌曲信息
返回:
dict[str, Any]: 歌曲详细信息
'''
if len(self._info) <= 0:
# 如果没有歌曲信息存储
response = Api(
API['songs']['info'].format(id=self.id), proxy=self.proxy
).request('get')
self._info = dict(response.json())
return self._info
# 获取歌曲所在服务器
@property
def server(self) -> Literal['jp', 'en', 'tw', 'cn', 'kr']:
'''获取歌曲所在服务器
返回:
Literal['jp', 'en', 'tw', 'cn', 'kr']: 歌曲所在服务器
'''
info = self.get_info()
# 获取 publishedAt 数据
if (published_at := info.get('publishedAt', None)) is None:
raise Exception('无法获取歌曲发布时间。')
# 根据 publishedAt 数据判断服务器
if published_at[0] is not None: return 'jp'
elif published_at[1] is not None: return 'en'
elif published_at[2] is not None: return 'tw'
elif published_at[3] is not None: return 'cn'
elif published_at[4] is not None: return 'kr'
else:
raise Exception('无法获取歌曲服务器。')
# 获取歌曲名称
@property
def name(self) -> str:
'''获取歌曲名称
返回:
str: 歌曲名称
'''
info = self.get_info()
# 获取 musicTitle 数据
if (music_title := info.get('musicTitle', None)) is None:
raise Exception('无法获取歌曲名称。')
# 获取第一个非 None 歌曲名称
try:
return next(filter(lambda x: x is not None, music_title))
except StopIteration:
raise Exception('无法获取歌曲名称。')
# 获取歌曲谱面
def get_chart(
self,
diff: Literal['easy', 'normal', 'hard', 'expert', 'special']='expert' | ) -> Chart: | 0 | 2023-11-16 13:09:20+00:00 | 12k |
kampta/asic | commons/logger.py | [
{
"identifier": "images2grid",
"path": "commons/utils.py",
"snippet": "def images2grid(images, **grid_kwargs):\n # images should be (N, C, H, W)\n grid = make_grid(images, **grid_kwargs)\n out = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()\n return o... | from torch.utils.tensorboard.writer import SummaryWriter
from PIL import Image
from commons.utils import images2grid, map_minmax, compute_pck, sample_tuples, \
pck_loop
from commons.draw import splat_points, load_fg_points, \
concat_v, get_colors, get_dense_colors, load_text_points
from thirdparty.colormap.colormap_flow import color_wheel_fast_smooth
import torch
import torch.nn.functional as F
import wandb
import numpy as np | 7,606 | flow, _ = stn(all_imgs[src_idx])
colors = F.grid_sample(colors, flow, padding_mode='border',
align_corners=True)
colors = map_minmax(colors, 0, 1, -1, 1)
alpha = 0.5
blend_img = alpha * all_imgs[src_idx] * (1-all_masks[src_idx]) + \
(all_imgs[src_idx] * alpha + colors * (1-alpha)) * all_masks[src_idx]
blend_img = torch.cat([wheel, blend_img, wheel, colors* all_masks[src_idx]])
writer.log_image_grid(blend_img, 'canon_map', train_idx, len(blend_img),
log_mean_img=False, nrow=len(blend_img)//2)
# Log keypoints from Image space to canonical space
if has_gt_kp:
canon_corrs = stn.transfer_forward(all_flows, all_kps[..., :2], res, is_flow=True)
canon_corrs = stn.unnormalize(canon_corrs, res, res)
canon_vis = all_kps[..., -1]
num_kp = canon_vis.size(-1)
N = canon_vis.size(0)
colors = kps_cols.permute(1, 0, 2).expand(-1, N, -1).to(device)
heatmaps = splat_points(
torch.ones(num_kp, 3, res, res, device=device) * -1,
canon_corrs.permute(1, 0, 2), sigma=6., opacity=1.,
colors=colors, alpha_channel=canon_vis.permute(1, 0).unsqueeze(-1))
writer.log_image_grid(heatmaps, 'kp_heatmaps', train_idx,
num_kp, padding=2, pad_value=1.)
# Log parts from Image space to canonical space
# Splat one part at a time to canonical
# TODO: splat all at once
num_parts = dset.num_parts
part_kp_canons = []
part_kp_vis = []
for part in range(num_parts):
part_masks = (parts == part).float().unsqueeze(1)
kp, kp_vis, _ = load_fg_points(part_masks, resolution=vis_denseres)
kp_canon = stn.transfer_forward(all_flows, kp[..., :2], res, is_flow=True)
kp_canon = stn.unnormalize(kp_canon, res, res)
part_kp_canons.append(kp_canon.reshape(-1, 2))
part_kp_vis.append(kp_vis.reshape(-1))
part_kp_canons = torch.stack(part_kp_canons)
part_kp_vis = torch.stack(part_kp_vis)
colors = parts_cols[:-1].unsqueeze(1).expand(-1, part_kp_vis.size(1), -1)
heatmaps = splat_points(
torch.ones(num_parts, 3, res, res, device=device) * -1,
part_kp_canons, sigma=2., opacity=1.,
colors=colors, alpha_channel=part_kp_vis.unsqueeze(-1))
writer.log_image_grid(heatmaps, 'part_heatmaps', train_idx,
num_parts, padding=2, pad_value=1.)
# Compute PCKs
N = all_imgs.size(0)
transfer_fn = stn.transfer_points
pck_pairs = None
if has_gt_kp:
# First compute PCK for all 2-pairs
if has_fixed_pairs:
tuples = dset.fixed_pairs
if dset.thresholds is not None:
thresholds = [torch.from_numpy(dset.thresholds)[tuples[:, 1]]]
else:
thresholds = None
else:
tuples = sample_tuples(N)
thresholds = None
print(f"First computing 2-point PCK for {len(tuples)} pairs")
gt_corrs, pred_corrs, vis = pck_loop(
tuples, all_kps, transfer_fn, all_flows, all_masks, res,
return_canon=False, is_flow=True)
pck_pairs = compute_pck(pred_corrs, gt_corrs, vis, thresholds,
img_size=res)
# Compute k-cycle PCK
pck_cycles = []
if not has_gt_kp:
kp, kp_vis, kp_col_dense = load_fg_points(all_masks,
resolution=vis_denseres)
ignore_idx = kp_vis.sum(dim=0) == 0
all_kps = torch.cat([kp[:, ~ignore_idx], kp_vis[:, ~ignore_idx].unsqueeze(-1)], dim=2)
ignore_interim = True
else:
ignore_interim = False
for k in [2, 3, 4]:
tuples = sample_tuples(N, k=k, count=200)
if has_fixed_pairs and dset.thresholds is not None:
thresholds = torch.from_numpy(dset.thresholds[tuples[:, 1:]])
thresholds = thresholds.reshape(-1)
else:
thresholds = None
print(f"Next computing {k}-cycle PCK for {len(tuples)} tuples")
gt_corrs, pred_corrs, vis = pck_loop(
tuples, all_kps, transfer_fn, all_flows, all_masks, res,
return_canon=False, is_flow=True, ignore_interim=ignore_interim)
pck = compute_pck(pred_corrs, gt_corrs, vis, thresholds, img_size=res)
pck_cycles.append(pck)
return pck_pairs, pck_cycles
class Logger(SummaryWriter):
def __init__(self, results_path, log_to_tb=False, log_to_wandb=True):
super().__init__(results_path)
self.results_path = results_path
self.log_to_tb = log_to_tb
self.log_to_wandb = log_to_wandb
def _log_image_grid(self, images, logging_name, prefix, itr, range=(-1, 1),
scale_each=False, nrow=None, **kwargs):
nrow = max(1, int(len(images) ** 0.5+0.5)) if nrow is None else nrow
if type(images[0]) is torch.Tensor:
ndarr = images2grid(images, return_as_PIL=True, nrow=nrow,
normalize=True, value_range=range,
scale_each=scale_each, **kwargs)
grid = Image.fromarray(ndarr)
grid.save(f"{self.results_path}/{logging_name}_{str(itr).zfill(7)}.png")
if self.log_to_wandb:
wandb.log({logging_name: wandb.Image(grid)}, step=itr)
else:
|
@torch.inference_mode()
def log_visuals(canon, stn, dset, train_idx, writer, vis_sample=2,
vis_denseres=32):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
pseudo_kps = dset.pseudo_kps
parts = dset.parts
vis_sample = min(vis_sample, len(dset))
res = dset.img_size
has_gt_kp = dset.kps is not None
has_fixed_pairs = dset.fixed_pairs is not None # SPair
# Run full test dataloader (assuming small dataset)
all_imgs = dset.imgs
all_masks = dset.masks
all_kps = dset.kps
all_flows, _ = stn(all_imgs)
if has_gt_kp:
kps_cols = torch.from_numpy(get_colors(all_kps.size(1))).float()
kps_cols = map_minmax(kps_cols, 0, 1, -1, 1).to(device).unsqueeze(0)
parts_cols = torch.from_numpy(get_colors(dset.num_parts+1)).float()
parts_cols = map_minmax(parts_cols, 0, 1, -1, 1).to(device)
parts_cols[-1] = 0
# Text logging
text_kp, text_kp_col = load_text_points('CVPR')
text_kp = text_kp.to(device).unsqueeze(0)
text_kp_col = text_kp_col.to(device).unsqueeze(0)
pairs = sample_tuples(len(dset), count=vis_sample, seed=0)
src_idx, trg_idx = pairs[:, 0], pairs[:, 1]
# Log only once during the training
if train_idx == 0:
# Log images and the mask
writer.log_image_grid(all_imgs[:vis_sample], 'img', train_idx,
vis_sample, nrow=vis_sample)
writer.log_image_grid(all_imgs[:vis_sample]*all_masks[:vis_sample],
'img_mask', train_idx, vis_sample, nrow=vis_sample)
# Log neural best buddies (sparse)
kp1 = pseudo_kps[src_idx, trg_idx]
kp2 = pseudo_kps[trg_idx, src_idx]
kp_vis = kp1[..., -1] * kp2[..., -1]
kp1, kp2 = kp1[..., :2], kp2[..., :2]
colors = map_minmax(get_dense_colors(kp1), 0, 1, -1, 1)
blend_src = splat_points(
all_imgs[src_idx], kp1, sigma=3., opacity=1.0, colors=colors,
alpha_channel=kp_vis.unsqueeze(-1))
blend_trg = splat_points(
all_imgs[trg_idx], kp2, sigma=3., opacity=1.0, colors=colors,
alpha_channel=kp_vis.unsqueeze(-1))
stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)
writer.log_image_grid(stacked, 'kp_pseudo_gt', train_idx, 2*vis_sample,
log_mean_img=False, nrow=2)
# Log parts
parts_img = parts_cols[parts[:vis_sample]].permute(0, 3, 1, 2)
writer.log_image_grid(parts_img, 'parts', train_idx, vis_sample,
nrow=vis_sample, log_mean_img=False)
# Log groundtruth kp
if has_gt_kp:
kp1, kp2 = all_kps[src_idx], all_kps[trg_idx]
kp_vis = kp1[..., -1] * kp2[..., -1]
kp1, kp2 = kp1[..., :2], kp2[..., :2]
colors = kps_cols.expand(vis_sample, -1, -1)
blend_src = splat_points(
all_imgs[src_idx], kp1, sigma=3., opacity=1.0, colors=colors,
alpha_channel=kp_vis.unsqueeze(-1))
blend_trg = splat_points(
all_imgs[trg_idx], kp2, sigma=3., opacity=1.0, colors=colors,
alpha_channel=kp_vis.unsqueeze(-1))
stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)
stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)
writer.log_image_grid(stacked, 'kp_gt', train_idx, 2*vis_sample,
log_mean_img=False, nrow=2)
# Log kp and top predictions by STN (if kp are available)
if has_gt_kp:
kp1 = all_kps[src_idx][..., :2]
kp_vis = all_kps[src_idx][..., 2]
kp_pred = stn.transfer_points(
kp1, src_idx, trg_idx, all_flows, mask=all_masks, res=res, is_flow=True)
colors = kps_cols.expand(vis_sample, -1, -1)
blend_src = splat_points(
all_imgs[src_idx], kp1, sigma=3., opacity=1.0,
colors=colors, alpha_channel=kp_vis.unsqueeze(-1))
blend_trg = splat_points(
all_imgs[trg_idx], kp_pred.float(), sigma=3., opacity=1.0,
colors=colors, alpha_channel=kp_vis.unsqueeze(-1))
stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)
writer.log_image_grid(stacked, 'kp_pred_sparse', train_idx,
2*vis_sample, log_mean_img=False, nrow=2)
# Log current canon image
canon_grid = canon.get_grid(vis_sample)
if canon_grid.size(1) > 3:
canon_grid = canon_grid[:, :3]
scale_factor = res / canon_grid.size(-1)
canon_grid = F.interpolate(
canon_grid, scale_factor=scale_factor, mode='bilinear')
writer.log_image_grid(canon_grid, 'canon', train_idx, 1, log_mean_img=False)
# Log dense correspondences
kp, kp_vis, kp_col_dense = load_fg_points(all_masks[src_idx],
resolution=vis_denseres)
kp_pred, kp_canon = stn.transfer_points(
kp, src_idx, trg_idx, all_flows, mask=all_masks, res=res,
return_canon=True, is_flow=True)
colors = map_minmax(kp_col_dense, 0, 1, -1, 1)
blend_src = splat_points(
all_imgs[src_idx], kp, sigma=4., opacity=0.75,
colors=colors, alpha_channel=kp_vis.unsqueeze(-1))
blend_trg = splat_points(
all_imgs[trg_idx], kp_pred.float(), sigma=4., opacity=0.75,
colors=colors, alpha_channel=kp_vis.unsqueeze(-1))
blend_canon = splat_points(
torch.ones_like(canon_grid) * -1, kp_canon, sigma=1.3, opacity=1.0,
colors=colors, alpha_channel=kp_vis.unsqueeze(-1))
stacked = torch.stack([blend_src, blend_canon, blend_trg], dim=1).\
flatten(0, 1)
writer.log_image_grid(
stacked, 'kp_pred_dense', train_idx, 3*vis_sample,
log_mean_img=False, nrow=3)
# # Log dense correspondences with text
# text_kp = text_kp.expand(vis_sample, -1, -1)
# text_kp_col = text_kp_col.expand(vis_sample, -1, -1)
# kp_pred, kp_canon = stn.transfer_points(
# text_kp, src_idx, trg_idx, all_flows, mask=all_masks, res=res,
# return_canon=True, is_flow=True)
# blend_src = splat_points(all_imgs[src_idx], text_kp, sigma=0.7, opacity=1.,
# colors=text_kp_col)
# blend_trg = splat_points(all_imgs[trg_idx], kp_pred.float(), sigma=0.7,
# opacity=1., colors=text_kp_col)
# blend_canon = splat_points(torch.ones_like(canon_grid) * -1, kp_canon,
# sigma=0.7, opacity=1., colors=text_kp_col)
# stacked = torch.stack([blend_src, blend_canon, blend_trg], dim=1).\
# flatten(0, 1)
# writer.log_image_grid(
# stacked, 'kp_pred_text', train_idx, 3*vis_sample,
# log_mean_img=False, nrow=3)
# Log dense mapping from canonical space to Image space
wheel = color_wheel_fast_smooth(res).permute(2, 0, 1).unsqueeze(0).to(device)
colors = wheel.expand(vis_sample, -1, -1, -1)
flow, _ = stn(all_imgs[src_idx])
colors = F.grid_sample(colors, flow, padding_mode='border',
align_corners=True)
colors = map_minmax(colors, 0, 1, -1, 1)
alpha = 0.5
blend_img = alpha * all_imgs[src_idx] * (1-all_masks[src_idx]) + \
(all_imgs[src_idx] * alpha + colors * (1-alpha)) * all_masks[src_idx]
blend_img = torch.cat([wheel, blend_img, wheel, colors* all_masks[src_idx]])
writer.log_image_grid(blend_img, 'canon_map', train_idx, len(blend_img),
log_mean_img=False, nrow=len(blend_img)//2)
# Log keypoints from Image space to canonical space
if has_gt_kp:
canon_corrs = stn.transfer_forward(all_flows, all_kps[..., :2], res, is_flow=True)
canon_corrs = stn.unnormalize(canon_corrs, res, res)
canon_vis = all_kps[..., -1]
num_kp = canon_vis.size(-1)
N = canon_vis.size(0)
colors = kps_cols.permute(1, 0, 2).expand(-1, N, -1).to(device)
heatmaps = splat_points(
torch.ones(num_kp, 3, res, res, device=device) * -1,
canon_corrs.permute(1, 0, 2), sigma=6., opacity=1.,
colors=colors, alpha_channel=canon_vis.permute(1, 0).unsqueeze(-1))
writer.log_image_grid(heatmaps, 'kp_heatmaps', train_idx,
num_kp, padding=2, pad_value=1.)
# Log parts from Image space to canonical space
# Splat one part at a time to canonical
# TODO: splat all at once
num_parts = dset.num_parts
part_kp_canons = []
part_kp_vis = []
for part in range(num_parts):
part_masks = (parts == part).float().unsqueeze(1)
kp, kp_vis, _ = load_fg_points(part_masks, resolution=vis_denseres)
kp_canon = stn.transfer_forward(all_flows, kp[..., :2], res, is_flow=True)
kp_canon = stn.unnormalize(kp_canon, res, res)
part_kp_canons.append(kp_canon.reshape(-1, 2))
part_kp_vis.append(kp_vis.reshape(-1))
part_kp_canons = torch.stack(part_kp_canons)
part_kp_vis = torch.stack(part_kp_vis)
colors = parts_cols[:-1].unsqueeze(1).expand(-1, part_kp_vis.size(1), -1)
heatmaps = splat_points(
torch.ones(num_parts, 3, res, res, device=device) * -1,
part_kp_canons, sigma=2., opacity=1.,
colors=colors, alpha_channel=part_kp_vis.unsqueeze(-1))
writer.log_image_grid(heatmaps, 'part_heatmaps', train_idx,
num_parts, padding=2, pad_value=1.)
# Compute PCKs
N = all_imgs.size(0)
transfer_fn = stn.transfer_points
pck_pairs = None
if has_gt_kp:
# First compute PCK for all 2-pairs
if has_fixed_pairs:
tuples = dset.fixed_pairs
if dset.thresholds is not None:
thresholds = [torch.from_numpy(dset.thresholds)[tuples[:, 1]]]
else:
thresholds = None
else:
tuples = sample_tuples(N)
thresholds = None
print(f"First computing 2-point PCK for {len(tuples)} pairs")
gt_corrs, pred_corrs, vis = pck_loop(
tuples, all_kps, transfer_fn, all_flows, all_masks, res,
return_canon=False, is_flow=True)
pck_pairs = compute_pck(pred_corrs, gt_corrs, vis, thresholds,
img_size=res)
# Compute k-cycle PCK
pck_cycles = []
if not has_gt_kp:
kp, kp_vis, kp_col_dense = load_fg_points(all_masks,
resolution=vis_denseres)
ignore_idx = kp_vis.sum(dim=0) == 0
all_kps = torch.cat([kp[:, ~ignore_idx], kp_vis[:, ~ignore_idx].unsqueeze(-1)], dim=2)
ignore_interim = True
else:
ignore_interim = False
for k in [2, 3, 4]:
tuples = sample_tuples(N, k=k, count=200)
if has_fixed_pairs and dset.thresholds is not None:
thresholds = torch.from_numpy(dset.thresholds[tuples[:, 1:]])
thresholds = thresholds.reshape(-1)
else:
thresholds = None
print(f"Next computing {k}-cycle PCK for {len(tuples)} tuples")
gt_corrs, pred_corrs, vis = pck_loop(
tuples, all_kps, transfer_fn, all_flows, all_masks, res,
return_canon=False, is_flow=True, ignore_interim=ignore_interim)
pck = compute_pck(pred_corrs, gt_corrs, vis, thresholds, img_size=res)
pck_cycles.append(pck)
return pck_pairs, pck_cycles
class Logger(SummaryWriter):
def __init__(self, results_path, log_to_tb=False, log_to_wandb=True):
super().__init__(results_path)
self.results_path = results_path
self.log_to_tb = log_to_tb
self.log_to_wandb = log_to_wandb
def _log_image_grid(self, images, logging_name, prefix, itr, range=(-1, 1),
scale_each=False, nrow=None, **kwargs):
nrow = max(1, int(len(images) ** 0.5+0.5)) if nrow is None else nrow
if type(images[0]) is torch.Tensor:
ndarr = images2grid(images, return_as_PIL=True, nrow=nrow,
normalize=True, value_range=range,
scale_each=scale_each, **kwargs)
grid = Image.fromarray(ndarr)
grid.save(f"{self.results_path}/{logging_name}_{str(itr).zfill(7)}.png")
if self.log_to_wandb:
wandb.log({logging_name: wandb.Image(grid)}, step=itr)
else: | grid = concat_v(*images) | 7 | 2023-11-14 16:43:16+00:00 | 12k |
AnonymGiant/ViLaM | lavis/runners/runner_iter.py | [
{
"identifier": "download_cached_file",
"path": "lavis/common/dist_utils.py",
"snippet": "def download_cached_file(url, check_hash=True, progress=False):\n \"\"\"\n Download a file from a URL and cache it locally. If the file already exists, it is not downloaded again.\n If distributed, only th... | import datetime
import logging
import os
import time
import torch
import torch.distributed as dist
import webdataset as wds
from lavis.common.dist_utils import download_cached_file, is_main_process, main_process
from lavis.common.registry import registry
from lavis.common.utils import is_url
from lavis.datasets.data_utils import concat_datasets, reorg_datasets_by_split
from lavis.runners.runner_base import RunnerBase
from torch.utils.data.dataset import ChainDataset | 8,067 | assert (
"agg_metrics" in val_log
), "No agg_metrics found in validation log."
agg_metrics = val_log["agg_metrics"]
if agg_metrics > best_agg_metric and split_name == "val":
best_iters, best_agg_metric = end_iters, agg_metrics
self._save_checkpoint(end_iters, is_best=True)
val_log.update({"best_iters": best_iters})
self.log_stats(val_log, split_name)
else:
# if no validation split is provided, we just save the checkpoint at the end of each inner epoch.
if not self.evaluate_only:
self._save_checkpoint(end_iters, is_best=False)
if self.evaluate_only:
break
dist.barrier()
# testing phase
self.evaluate(cur_epoch=self.cur_epoch)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logging.info("Training time {}".format(total_time_str))
def train_iters(self, epoch, start_iters):
# train by iterations
self.model.train()
return self.task.train_iters(
epoch=epoch,
start_iters=start_iters,
iters_per_inner_epoch=self.iters_per_inner_epoch,
model=self.model,
data_loader=self.train_loader,
optimizer=self.optimizer,
scaler=self.scaler,
lr_scheduler=self.lr_scheduler,
cuda_enabled=self.cuda_enabled,
log_freq=self.log_freq,
accum_grad_iters=self.accum_grad_iters,
)
@main_process
def _save_checkpoint(self, cur_iters, is_best=False):
save_obj = {
"model": self.unwrap_dist_model(self.model).state_dict(),
"optimizer": self.optimizer.state_dict(),
"config": self.config.to_dict(),
"scaler": self.scaler.state_dict() if self.scaler else None,
"iters": cur_iters,
}
save_to = os.path.join(
self.output_dir,
"checkpoint_{}.pth".format("best" if is_best else cur_iters),
)
logging.info("Saving checkpoint at iters {} to {}.".format(cur_iters, save_to))
torch.save(save_obj, save_to)
def _load_checkpoint(self, url_or_filename):
"""
Resume from a checkpoint.
"""
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location=self.device)
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location=self.device)
else:
raise RuntimeError("checkpoint url or path is invalid")
state_dict = checkpoint["model"]
self.unwrap_dist_model(self.model).load_state_dict(state_dict)
self.optimizer.load_state_dict(checkpoint["optimizer"])
if self.scaler and "scaler" in checkpoint:
self.scaler.load_state_dict(checkpoint["scaler"])
self.start_iters = checkpoint["iters"] + 1
logging.info("Resume checkpoint from {}".format(url_or_filename))
@property
def dataloaders(self) -> dict:
"""
A property to get and create dataloaders by split just in need.
If no train_dataset_ratio is provided, concatenate map-style datasets and
chain wds.DataPipe datasets separately. Training set becomes a tuple
(ConcatDataset, ChainDataset), both are optional but at least one of them is
required. The resultant ConcatDataset and ChainDataset will be sampled evenly.
If train_dataset_ratio is provided, create a MultiIterLoader to sample
each dataset by ratios during training.
Currently do not support multiple datasets for validation and test.
Returns:
dict: {split_name: (tuples of) dataloader}
"""
if self._dataloaders is None:
# reoganize datasets by split and concatenate/chain if necessary
dataset_ratios = self.config.run_cfg.get("train_dataset_ratios", None)
if dataset_ratios is None:
# concatenate map-style datasets and chain wds.DataPipe datasets separately
# training set becomes a tuple (ConcatDataset, ChainDataset), both are
# optional but at least one of them is required. The resultant ConcatDataset
# and ChainDataset will be sampled evenly.
logging.info(
"dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline)."
)
datasets = reorg_datasets_by_split(self.datasets)
| """
Copyright (c) 2022, salesforce.com, inc.
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
"""
@registry.register_runner("runner_iter")
class RunnerIter(RunnerBase):
"""
Run training based on the number of iterations. This is common when
the training dataset size is large. Underhood logic is similar to
epoch-based training by considering every #iters_per_inner_epoch as an
inner epoch.
In iter-based runner, after every #iters_per_inner_epoch steps, we
1) do a validation epoch;
2) schedule the learning rate;
3) save the checkpoint.
We refer every #iters_per_inner_epoch steps as an inner epoch.
"""
def __init__(self, cfg, task, model, datasets, job_id):
super().__init__(cfg, task, model, datasets, job_id)
self.start_iters = 0
self.max_iters = int(self.config.run_cfg.get("max_iters", -1))
assert self.max_iters > 0, "max_iters must be greater than 0."
self.iters_per_inner_epoch = int(
self.config.run_cfg.get("iters_per_inner_epoch", -1)
)
assert (
self.iters_per_inner_epoch > 0
), "iters_per_inner_epoch must be greater than 0."
@property
def max_epoch(self):
return int(self.max_iters / self.iters_per_inner_epoch)
@property
def cur_epoch(self):
try:
return self.train_loader.epoch
except AttributeError:
# pipeline data (e.g. LAION) is streaming, have no concept of epoch
return 0
def _progress(self, cur_iters):
return "{}_iters={}".format(self.cur_epoch, cur_iters)
def train(self):
start_time = time.time()
best_agg_metric = 0
best_iters = 0
self.log_config()
# resume from checkpoint if specified
if not self.evaluate_only and self.resume_ckpt_path is not None:
self._load_checkpoint(self.resume_ckpt_path)
for start_iters in range(
self.start_iters, self.max_iters, self.iters_per_inner_epoch
):
end_iters = start_iters + self.iters_per_inner_epoch
# training phase
if not self.evaluate_only:
logging.info(
"Start training, max_iters={}, in total {} inner epochs.".format(
self.max_iters, int(self.max_iters / self.iters_per_inner_epoch)
)
)
train_stats = self.train_iters(self.cur_epoch, start_iters)
self.log_stats(split_name="train", stats=train_stats)
self._save_checkpoint(end_iters, is_best=False)
# evaluation phase
if len(self.valid_splits) > 0:
for split_name in self.valid_splits:
logging.info("Evaluating on {}.".format(split_name))
val_log = self.eval_epoch(
split_name=split_name, cur_epoch=self._progress(end_iters)
)
if val_log is not None:
if is_main_process():
assert (
"agg_metrics" in val_log
), "No agg_metrics found in validation log."
agg_metrics = val_log["agg_metrics"]
if agg_metrics > best_agg_metric and split_name == "val":
best_iters, best_agg_metric = end_iters, agg_metrics
self._save_checkpoint(end_iters, is_best=True)
val_log.update({"best_iters": best_iters})
self.log_stats(val_log, split_name)
else:
# if no validation split is provided, we just save the checkpoint at the end of each inner epoch.
if not self.evaluate_only:
self._save_checkpoint(end_iters, is_best=False)
if self.evaluate_only:
break
dist.barrier()
# testing phase
self.evaluate(cur_epoch=self.cur_epoch)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logging.info("Training time {}".format(total_time_str))
def train_iters(self, epoch, start_iters):
# train by iterations
self.model.train()
return self.task.train_iters(
epoch=epoch,
start_iters=start_iters,
iters_per_inner_epoch=self.iters_per_inner_epoch,
model=self.model,
data_loader=self.train_loader,
optimizer=self.optimizer,
scaler=self.scaler,
lr_scheduler=self.lr_scheduler,
cuda_enabled=self.cuda_enabled,
log_freq=self.log_freq,
accum_grad_iters=self.accum_grad_iters,
)
@main_process
def _save_checkpoint(self, cur_iters, is_best=False):
save_obj = {
"model": self.unwrap_dist_model(self.model).state_dict(),
"optimizer": self.optimizer.state_dict(),
"config": self.config.to_dict(),
"scaler": self.scaler.state_dict() if self.scaler else None,
"iters": cur_iters,
}
save_to = os.path.join(
self.output_dir,
"checkpoint_{}.pth".format("best" if is_best else cur_iters),
)
logging.info("Saving checkpoint at iters {} to {}.".format(cur_iters, save_to))
torch.save(save_obj, save_to)
def _load_checkpoint(self, url_or_filename):
"""
Resume from a checkpoint.
"""
if is_url(url_or_filename):
cached_file = download_cached_file(
url_or_filename, check_hash=False, progress=True
)
checkpoint = torch.load(cached_file, map_location=self.device)
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location=self.device)
else:
raise RuntimeError("checkpoint url or path is invalid")
state_dict = checkpoint["model"]
self.unwrap_dist_model(self.model).load_state_dict(state_dict)
self.optimizer.load_state_dict(checkpoint["optimizer"])
if self.scaler and "scaler" in checkpoint:
self.scaler.load_state_dict(checkpoint["scaler"])
self.start_iters = checkpoint["iters"] + 1
logging.info("Resume checkpoint from {}".format(url_or_filename))
@property
def dataloaders(self) -> dict:
"""
A property to get and create dataloaders by split just in need.
If no train_dataset_ratio is provided, concatenate map-style datasets and
chain wds.DataPipe datasets separately. Training set becomes a tuple
(ConcatDataset, ChainDataset), both are optional but at least one of them is
required. The resultant ConcatDataset and ChainDataset will be sampled evenly.
If train_dataset_ratio is provided, create a MultiIterLoader to sample
each dataset by ratios during training.
Currently do not support multiple datasets for validation and test.
Returns:
dict: {split_name: (tuples of) dataloader}
"""
if self._dataloaders is None:
# reoganize datasets by split and concatenate/chain if necessary
dataset_ratios = self.config.run_cfg.get("train_dataset_ratios", None)
if dataset_ratios is None:
# concatenate map-style datasets and chain wds.DataPipe datasets separately
# training set becomes a tuple (ConcatDataset, ChainDataset), both are
# optional but at least one of them is required. The resultant ConcatDataset
# and ChainDataset will be sampled evenly.
logging.info(
"dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline)."
)
datasets = reorg_datasets_by_split(self.datasets) | self.datasets = concat_datasets(datasets) | 5 | 2023-11-14 08:57:59+00:00 | 12k |
ml4bio/USPNet | Net/New_ComModel.py | [
{
"identifier": "MultiHeadAttention",
"path": "Net/SelfAttentionTorch.py",
"snippet": "class MultiHeadAttention(nn.Module):\n\n def __init__(self,\n config\n ):\n \"\"\"Multi-head attention.\n :param in_features: Size of each input sample.\n :param... | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from Net.LSTM import *
from Net.CNN import *
from Net.SelfAttentionTorch import MultiHeadAttention
from Net.transformer import TransformerEncoder
from torch.autograd import Variable
from torch.nn import Parameter
from Net.CRF import CRF
from Net.LSTM_Attention import LSTM_attention | 10,222 |
embedding_feature_dim_msa = 768
embedding_feature_dim_pro = 1024
class NormedLinear(nn.Module):
def __init__(self, in_features, out_features):
super(NormedLinear, self).__init__()
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
def forward(self, x):
out = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0))
return out
class Attention_CRF(nn.Module):
def __init__(self, config, config1, cnn_configs, lstm_lan_config, lstm_config,
use_CRF=False, use_attention=True, reweight_ratio=None):
super(Attention_CRF, self).__init__()
self.num_classes = 20
self.max_len = config1['max_text_len']
self.embedding = nn.Embedding(num_embeddings=config['vocab_size'], embedding_dim=config['embedding_size'])
self.ef1 = 512
self.ef2 = 144
self.ef3 = 32
self.csef = 11
self.ef4 = 256
self.ef5 = 256
self.ef6 = 64
if (use_CRF):
|
embedding_feature_dim_msa = 768
embedding_feature_dim_pro = 1024
class NormedLinear(nn.Module):
def __init__(self, in_features, out_features):
super(NormedLinear, self).__init__()
self.weight = Parameter(torch.Tensor(in_features, out_features))
self.weight.data.uniform_(-1, 1).renorm_(2, 1, 1e-5).mul_(1e5)
def forward(self, x):
out = F.normalize(x, dim=1).mm(F.normalize(self.weight, dim=0))
return out
class Attention_CRF(nn.Module):
def __init__(self, config, config1, cnn_configs, lstm_lan_config, lstm_config,
use_CRF=False, use_attention=True, reweight_ratio=None):
super(Attention_CRF, self).__init__()
self.num_classes = 20
self.max_len = config1['max_text_len']
self.embedding = nn.Embedding(num_embeddings=config['vocab_size'], embedding_dim=config['embedding_size'])
self.ef1 = 512
self.ef2 = 144
self.ef3 = 32
self.csef = 11
self.ef4 = 256
self.ef5 = 256
self.ef6 = 64
if (use_CRF): | self.crf = CRF(num_tags=11, reweight_ratio=reweight_ratio)#original: num_tags=9 | 2 | 2023-11-14 08:19:42+00:00 | 12k |
doodledood/chat-flock | examples/manual_hierarchical_participant.py | [
{
"identifier": "InMemoryChatDataBackingStore",
"path": "chatflock/backing_stores/in_memory.py",
"snippet": "class InMemoryChatDataBackingStore(ChatDataBackingStore):\n messages: List[ChatMessage]\n participants: Dict[str, ChatParticipant]\n last_message_id: Optional[int] = None\n\n def __in... | import typer
from dotenv import load_dotenv
from halo import Halo
from chatflock.backing_stores import InMemoryChatDataBackingStore
from chatflock.base import Chat
from chatflock.conductors import LangChainBasedAIChatConductor, RoundRobinChatConductor
from chatflock.participants.group import GroupBasedChatParticipant
from chatflock.participants.langchain import LangChainBasedAIChatParticipant
from chatflock.participants.user import UserChatParticipant
from chatflock.renderers import TerminalChatRenderer
from examples.common import create_chat_model | 8,468 |
def manual_hierarchical_participant(model: str = "gpt-4-1106-preview", temperature: float = 0.0) -> None:
chat_model = create_chat_model(model=model, temperature=temperature)
spinner = Halo(spinner="dots")
comedy_team = GroupBasedChatParticipant(
group_name="Comedy Team",
mission="Collaborate on funny humour-filled responses based on the original request for the user",
chat=Chat(
backing_store=InMemoryChatDataBackingStore(),
renderer=TerminalChatRenderer(),
initial_participants=[
|
def manual_hierarchical_participant(model: str = "gpt-4-1106-preview", temperature: float = 0.0) -> None:
chat_model = create_chat_model(model=model, temperature=temperature)
spinner = Halo(spinner="dots")
comedy_team = GroupBasedChatParticipant(
group_name="Comedy Team",
mission="Collaborate on funny humour-filled responses based on the original request for the user",
chat=Chat(
backing_store=InMemoryChatDataBackingStore(),
renderer=TerminalChatRenderer(),
initial_participants=[ | LangChainBasedAIChatParticipant( | 5 | 2023-11-12 11:10:58+00:00 | 12k |
CryptoFuzzPy/cryptofuzz | cryptofuzz/Wallet.py | [
{
"identifier": "Convertor",
"path": "cryptofuzz/utils.py",
"snippet": "class Convertor:\n def __init__(self):\n super().__init__()\n self.gen = Generator()\n \n def double_sha256(self, data):\n return hashlib.sha256(hashlib.sha256(data).digest()).digest()\n \n def mn... | import os
from . import Generator, Convertor
from . import (
Bitcoin, BitcoinGold, Dash, DigiByte, Dogecoin, Ethereum, Litecoin, Qtum, Ravencoin, Tron, Zcash, Axe
) | 9,565 | >>> p2wpkh_p2sh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wpkh_p2sh')
>>> p2wsh_p2sh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wsh_p2sh')
--------------------------------------------------------
"""
BTC = Bitcoin()
if Type == 'p2pkh':
return BTC.hex_addr(privatekey, 'p2pkh')
elif Type == 'p2sh':
return BTC.hex_addr(privatekey, 'p2sh')
elif Type == 'p2wpkh':
return BTC.hex_addr(privatekey, 'p2wpkh')
elif Type == 'p2wsh':
return BTC.hex_addr(privatekey, 'p2wsh')
elif Type == 'p2wpkh_p2sh':
return BTC.hex_addr(privatekey, 'p2wpkh_p2sh')
elif Type == 'p2wsh_p2sh':
return BTC.hex_addr(privatekey, 'p2wsh_p2sh')
else:
return BTC.hex_addr(privatekey, 'p2pkh')
# ----------------------------------------------------------
def PrivateKey_To_Ethereum_Addr(privatekey: str) -> str:
"""
Convert Private Key To Ethereum Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Ethereum_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_Ethereum_Addr(Privatekey)
--------------------------------------------------------
"""
ETH = Ethereum()
return ETH.hex_addr(privatekey)
# ----------------------------------------------------------
def PrivateKey_To_BitcoinGold_Addr(privatekey: str) -> str:
"""
Convert Private Key To Bitcoin Gold Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_BitcoinGold_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_BitcoinGold_Addr(Privatekey)
--------------------------------------------------------
"""
BTG = BitcoinGold()
return BTG.hex_addr(privatekey)
# ----------------------------------------------------------
def PrivateKey_To_Dash_Addr(privatekey: str) -> str:
"""
Convert Private Key To Dash Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Dash_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_Dash_Addr(Privatekey)
--------------------------------------------------------
"""
DASH = Dash()
return DASH.hex_addr(privatekey)
# ----------------------------------------------------------
def PrivateKey_To_DigiByte_Addr(privatekey: str) -> str:
"""
Convert Private Key To Digibyte Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Digibyte_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_DigiByte_Addr(Privatekey)
--------------------------------------------------------
"""
| # programmer and owner mmdrza.com
# ----------------------------------------------------------
convertor = Convertor()
generator = Generator()
# ----------------------------------------------------------
def getPrivateKey() -> str:
"""
Generate a private key without repeating.
:return private key:
:rtype str:
---------------------------------------------------
>>> Privatekey = getPrivateKey()
---------------------------------------------------
"""
return generator.generate_private_key()
# ----------------------------------------------------------
def getMnemonic(size: int = 12) -> str:
"""
Generate Random Standard Mnemonic BIP39.
:param size:
:type size: Int
:return mnemonic:
:rtype str:
--------------------------------------------------
>>> Mnemonic = getMnemonic()
--------------------------------------------------
"""
return generator.generate_mnemonic(size=size)
# ----------------------------------------------------------
def getBinary() -> str:
"""
Generate random Binary With Length 256 (256 bits).
:rtype str:
:return binary:
-------------------------------------------------
>>> Binary = getBinary()
------------------------------------------------
"""
return generator.generate_binary()
# ----------------------------------------------------------
def getRootKey() -> str:
"""
Generate Root Key.
:rtype str:
:return root key:
------------------------------------------------
>>> RootKey = getRootKey()
------------------------------------------------
"""
return generator.generate_xprv()
# -------------------------------------------------------------------
def getBytes() -> bytes: return os.urandom(32)
# -------------------------------------------------------------------
def getDecimal() -> int: return generator.generate_decimal()
# -------------------------------------------------------------------
def PrivateKey_To_Addr(hexed: str, compress: bool = False) -> str:
"""
Convert Private key Hex To Compress and UnCompress Address.
:param hexed:
:type hexed: str
:param compress:
:type compress: bool
:return address:
:rtype str:
----------------------------------------------------------
>>> privatekey = "0A97965...A45517" # example Private Key
>>> address_compress = PrivateKey_To_Addr(privatekey, True)
>>> address_uncompress = PrivateKey_To_Addr(privatekey, False)
----------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
if compress:
return convertor.bytes_to_addr(seed, True)
else:
return convertor.bytes_to_addr(seed, False)
# ----------------------------------------------------------
def PrivateKey_To_Wif(hexed: str, compress: bool = False) -> str:
"""
Convert Private key Hex To Compress and UnCompress WIF.
:param hexed:
:type hexed: str
:param compress:
:type compress: bool
:return wif:
:rtype str:
------------------------------------------------------------
>>> privatekey = "0A97965...A45517" # example Private Key
>>> wif_compress = PrivateKey_To_Wif(privatekey, True)
>>> wif_uncompress = PrivateKey_To_Wif(privatekey, False)
------------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
if compress:
return convertor.bytes_to_wif(seed, True)
else:
return convertor.bytes_to_wif(seed, False)
# ----------------------------------------------------------
def PrivateKey_To_PublicKey(hexed: str, compress: bool = False) -> str:
"""
Convert Private key Hex To compress and uncompress Public Key.
:param hexed:
:type hexed: str
:param compress:
:type compress: bool
:return public key:
:rtype str:
------------------------------------------------
>>> privatekey = "0A97965...A45517" # example Private Key
>>> publickey_compress = PrivateKey_To_PublicKey(privatekey, True)
>>> publickey_uncompress = PrivateKey_To_PublicKey(privatekey, False)
------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
if compress:
return convertor.bytes_to_public(seed, True).hex()
else:
return convertor.bytes_to_public(seed, False).hex()
# ----------------------------------------------------------
def PrivateKey_To_Mnemonic(hexed: str) -> str:
"""
Convert Private key Hex To Mnemonic.
:param hexed:
:type hexed: str
:return mnemonic:
:rtype str:
--------------------------------------------------------
>>> privatekey = "0A97965...A45517" # example Private Key
>>> mnemonic = PrivateKey_To_Mnemonic(privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_mne(seed)
# ----------------------------------------------------------
def PrivateKey_To_Byte(hexed: str) -> bytes:
"""
Convert Private key Hex To Byte.
:param hexed:
:type hexed: Str.
:return byte:
:rtype bytes:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> byte = PrivateKey_To_Byte(Privatekey)
--------------------------------------------------------
"""
return convertor.hex_to_bytes(hexed)
# ----------------------------------------------------------
def PrivateKey_To_Binary(hexed: str) -> str:
"""
Convert Private key Hex To Binary.
:param hexed:
:type hexed: Str
:return binary:
:rtype str:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> binary = PrivateKey_To_Binary(Privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_binary(seed)
# ----------------------------------------------------------
def PrivateKey_To_Decimal(hexed: str) -> int:
"""
Convert Private key Hex To Decimal.
:param hexed:
:type hexed: Str
:return decimal:
:rtype int:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> decimal = PrivateKey_To_Decimal(Privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_int(seed)
# ----------------------------------------------------------
def PrivateKey_To_XPRV(hexed: str) -> str:
"""
Convert Private key Hex To XPRV.
:param hexed:
:type hexed: Str
:return xprv:
:rtype str:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> xprv = PrivateKey_To_XPRV(Privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_xprv(seed)
# ----------------------------------------------------------
def PrivateKey_To_CompressAddr(hexed: str) -> str:
"""
Convert Private key Hex To Compress Address.
:param hexed:
:type hexed: Str
:return address:
:rtype str:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> address_compress = PrivateKey_To_CompressAddr(Privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_addr(seed, True)
# ----------------------------------------------------------
def PrivateKey_To_UncompressAddr(hexed: str) -> str:
"""
Convert Private key Hex To UnCompress Address.
:param hexed:
:type hexed: Str
:return address:
:rtype str:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> address_uncompress = PrivateKey_To_UncompressAddr(Privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_addr(seed, False)
# ----------------------------------------------------------
def PrivateKey_To_XPUB(hexed: str) -> str:
"""
Convert Private key Hex To XPUB.
:param hexed:
:type hexed: Str
:return xpub:
:rtype str:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> xpub = PrivateKey_To_XPUB(Privatekey)
--------------------------------------------------------
"""
seed = convertor.hex_to_bytes(hexed)
return convertor.bytes_to_xpub(seed)
# ----------------------------------------------------------
def Bytes_To_PrivateKey(byte: bytes) -> str:
"""
Convert Byte To Private Key.
:param byte:
:type byte: Bytes
:return private key:
:rtype str:
--------------------------------------------------------
>>> Privatekey = "0A97965...A45517" # example Private Key
>>> privatekey = Bytes_To_PrivateKey(Privatekey)
--------------------------------------------------------
"""
return convertor.bytes_to_hex(byte)
# ----------------------------------------------------------
def Bytes_To_Address(seed: bytes, compress: bool = False):
"""
Convert Bytes To Compressed and Uncompressed Address.
:param seed:
:type seed: Bytes
:param compress:
:type compress: bool
:return address:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> address_compress = Bytes_To_Address(seedBytes, True)
>>> address_uncompress = Bytes_To_Address(seedBytes, False)
--------------------------------------------------------
"""
if compress:
return convertor.bytes_to_addr(seedBytes=seed, compress=True)
else:
return convertor.bytes_to_addr(seedBytes=seed, compress=False)
# ----------------------------------------------------------
def Bytes_To_Mnemonic(seed: bytes) -> str:
"""
Convert Bytes To Mnemonic.
:param seed:
:type seed: Bytes
:return mnemonic:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> mnemonic = Bytes_To_Mnemonic(seedBytes)
--------------------------------------------------------
"""
return convertor.bytes_to_mne(seed)
# ----------------------------------------------------------
def Bytes_To_XPRV(seed: bytes) -> str:
"""
Convert Bytes To XPRV.
:param seed:
:type seed: Bytes
:return xprv:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> xprv = Bytes_To_XPRV(seedBytes)
--------------------------------------------------------
"""
return convertor.bytes_to_xprv(seed)
# ----------------------------------------------------------
def Bytes_To_Binary(seed: bytes):
"""
Convert Bytes To Binary.
:param seed:
:type seed: Bytes
:return binary:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> binary = Bytes_To_Binary(seedBytes)
--------------------------------------------------------
"""
return convertor.bytes_to_binary(seed)
# ----------------------------------------------------------
def Bytes_To_PublicKey(seed: bytes, compress: bool = False):
"""
Convert Bytes To Public Key Compressed and Uncompressed.
:param seed:
:type seed: Bytes
:param compress:
:type compress: bool
:return public:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> public_compress = Bytes_To_PublicKey(seedBytes, True)
>>> public_uncompress = Bytes_To_PublicKey(seedBytes, False)
--------------------------------------------------------
"""
if compress:
return convertor.bytes_to_public(seed, True).hex()
else:
return convertor.bytes_to_public(seed, False).hex()
# ----------------------------------------------------------
def Bytes_To_Compress_Addr(seed: bytes) -> str:
"""
Convert Bytes To Compressed Address.
:param seed:
:type seed: Bytes
:return address:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> address_compress = Bytes_To_Compress_Addr(seedBytes)
--------------------------------------------------------
"""
return convertor.bytes_to_addr(seed, True)
# ----------------------------------------------------------
def Bytes_To_Uncompress_Addr(seed: bytes) -> str:
"""
Convert Bytes To Uncompressed Address.
:param seed:
:type seed: Bytes
:return address:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> address_uncompress = Bytes_To_Uncompress_Addr(seedBytes)
--------------------------------------------------------
"""
return convertor.bytes_to_addr(seed, False)
# ----------------------------------------------------------
def Bytes_To_Decimal(seed: bytes):
"""
Convert Bytes To Decimal.
:param seed:
:type seed: Bytes
:return decimal:
:rtype int:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> decimal = Bytes_To_Decimal(seedBytes)
--------------------------------------------------------
"""
return convertor.bytes_to_int(seed)
# ----------------------------------------------------------
def Bytes_To_XPUB(seed: bytes) -> str:
return convertor.bytes_to_xpub(seed)
# ----------------------------------------------------------
def Bytes_To_Wif(seed: bytes, compress: bool = False) -> str:
"""
Convert Bytes To Wif Compressed and UnCompressed.
:param seed:
:type seed: Bytes
:param compress:
:type compress: bool
:return wif:
:rtype str:
--------------------------------------------------------
>>> seedBytes = b"\x00\x00\x00\x00...\x00\x00\x00\x00\x00\x00\x00\x00" # example seed
>>> wif_compress = Bytes_To_Wif(seedBytes, True)
>>> wif_uncompress = Bytes_To_Wif(seedBytes, False)
--------------------------------------------------------
"""
if compress:
return convertor.bytes_to_wif(seed, True)
else:
return convertor.bytes_to_wif(seed, False)
# ----------------------------------------------------------
def Mnemonic_To_Bytes(mnemonic: str) -> bytes:
return convertor.mne_to_seed(mnemonic=mnemonic)
# ----------------------------------------------------------
def Mnemonic_To_PrivateKey(mnemonic: str) -> str:
seed = convertor.mne_to_seed(mnemonic=mnemonic)
return convertor.bytes_to_hex(seed=seed)
# ----------------------------------------------------------
def Mnemonic_To_PublicKey(mnemonic: str, compress: bool = False):
seed = convertor.mne_to_seed(mnemonic=mnemonic)
if compress:
pub = convertor.bytes_to_public(seed, True).hex()
return convertor.pub_to_addr(pub)
else:
pub = convertor.bytes_to_public(seed, False).hex()
return convertor.pub_to_addr(pub)
# ----------------------------------------------------------
def Mnemonic_To_Decimal(mnemonic: str):
seed = convertor.mne_to_seed(mnemonic=mnemonic)
return convertor.bytes_to_int(seed)
# ----------------------------------------------------------
def Mnemonic_To_Binary(mnemonic: str):
seed = convertor.mne_to_seed(mnemonic=mnemonic)
return convertor.bytes_to_binary(seed)
# ----------------------------------------------------------
def Mnemonic_To_XPRV(mnemonic: str):
seedBytes = convertor.mne_to_seed(mnemonic)
return convertor.bytes_to_xprv(seedBytes)
# ----------------------------------------------------------
def Mnemonic_To_Addr(mnemonic: str, compress: bool = False) -> str:
seedBytes = convertor.mne_to_seed(mnemonic)
if compress:
return convertor.bytes_to_addr(seedBytes, True)
else:
return convertor.bytes_to_addr(seedBytes, False)
# ----------------------------------------------------------
def Mnemonic_To_XPUB(mnemonic: str):
seedBytes = convertor.mne_to_seed(mnemonic)
return convertor.bytes_to_xpub(seedBytes)
# ----------------------------------------------------------
def Mnemonic_To_Wif(mnemonic: str, compress: bool = False) -> str:
seedBytes = convertor.mne_to_seed(mnemonic)
if compress:
return convertor.bytes_to_wif(seedBytes, True)
else:
return convertor.bytes_to_wif(seedBytes, False)
# ----------------------------------------------------------
def Passphrase_To_Addr(passphrase: str, compress: bool = False) -> str:
if compress:
return convertor.pass_to_addr(passphrase, True)
else:
return convertor.pass_to_addr(passphrase, False)
# ----------------------------------------------------------
def Passphrase_To_Bytes(passphrase: str) -> bytes:
return convertor.pass_to_bytes(passphrase)
# ----------------------------------------------------------
def Passphrase_To_PrivateKey(passphrase: str) -> str:
return convertor.bytes_to_hex(convertor.pass_to_bytes(passphrase))
# ----------------------------------------------------------
def Passphrase_To_PublicKey(passphrase: str, compress: bool = False) -> str:
seed = convertor.pass_to_bytes(passphrase)
if compress:
return convertor.bytes_to_public(seed, True).hex()
else:
return convertor.bytes_to_public(seed, False).hex()
# ----------------------------------------------------------
def Passphrase_To_Wif(passphrase: str, compress: bool = False) -> str:
seed = convertor.pass_to_bytes(passphrase)
if compress:
return convertor.bytes_to_wif(seed, True)
else:
return convertor.bytes_to_wif(seed, False)
# ----------------------------------------------------------
def Passphrase_To_RootKey(passphrase: str) -> str:
seed = convertor.pass_to_bytes(passphrase)
return convertor.bytes_to_xprv(seed)
# ----------------------------------------------------------
def Passphrase_To_XPUB(passphrase: str) -> str:
seed = convertor.pass_to_bytes(passphrase)
return convertor.bytes_to_xpub(seed)
# ----------------------------------------------------------
def Passphrase_To_Decimal(passphrase: str) -> int:
seed = convertor.pass_to_bytes(passphrase)
return convertor.bytes_to_int(seed)
# ----------------------------------------------------------
def Wif_To_Bytes(wif: str) -> bytes:
return convertor.wif_to_bytes(wif)
# ----------------------------------------------------------
def Wif_To_Addr(wif: str, compress: bool = False) -> str:
return convertor.wif_to_addr(wif, compress)
# ----------------------------------------------------------
def Wif_To_PrivateKey(wif: str) -> str:
return convertor.bytes_to_hex(convertor.wif_to_bytes(wif))
# ----------------------------------------------------------
def Wif_To_Mnemonic(wif: str) -> str:
return convertor.bytes_to_mne(convertor.wif_to_bytes(wif))
# ----------------------------------------------------------
def Wif_To_Decimal(wif: str) -> int:
return convertor.bytes_to_int(convertor.wif_to_bytes(wif))
# ----------------------------------------------------------
def Wif_To_Binary(wif: str) -> str:
return convertor.bytes_to_binary(convertor.wif_to_bytes(wif))
# ----------------------------------------------------------
def Wif_To_XPRV(wif: str) -> str:
return convertor.bytes_to_xprv(convertor.wif_to_bytes(wif))
# ----------------------------------------------------------
def Wif_To_XPUB(wif: str) -> str: return convertor.bytes_to_xpub(convertor.wif_to_bytes(wif))
# ----------------------------------------------------------
def Wif_To_RootKey(wif: str) -> str:
return Wif_To_XPRV(wif)
# ----------------------------------------------------------
def Wif_To_PublicKey(wif: str, compress: bool = False):
seed = convertor.wif_to_bytes(wif)
if compress:
return convertor.bytes_to_public(seed, True).hex()
else:
return convertor.bytes_to_public(seed, False).hex()
# ----------------------------------------------------------
def Decimal_To_PrivateKey(dec: int) -> str:
return "%064x" % dec
# ----------------------------------------------------------
def Decimal_To_Bytes(dec: int) -> bytes:
return convertor.int_to_bytes(dec)
# ----------------------------------------------------------
def Decimal_To_PublicKey(dec: int, compress: bool = False) -> str:
seed = Decimal_To_Bytes(dec)
if compress:
return convertor.bytes_to_public(seed, True).hex()
else:
return convertor.bytes_to_public(seed, False).hex()
# ----------------------------------------------------------
def Decimal_To_Address(dec: int, compress: bool = False) -> str:
seed = Decimal_To_Bytes(dec)
if compress:
return convertor.bytes_to_addr(seed, True)
else:
return convertor.bytes_to_addr(seed, False)
# ----------------------------------------------------------
def Decimal_To_Mnemonic(dec: int) -> str:
seed = convertor.int_to_bytes(dec)
return convertor.bytes_to_mne(seed)
# ----------------------------------------------------------
def Decimal_To_XPRV(dec: int) -> str:
seed = convertor.int_to_bytes(dec)
return convertor.bytes_to_xprv(seed)
# ----------------------------------------------------------
def Decimal_To_XPUB(dec: int) -> str:
seed = convertor.int_to_bytes(dec)
return convertor.bytes_to_xpub(seed)
# ----------------------------------------------------------
def Decimal_To_Binary(dec: int) -> str:
seed = convertor.int_to_bytes(dec)
return convertor.bytes_to_binary(seed)
def Decimal_To_Wif(dec: int, compress: bool = False) -> str:
seed = convertor.int_to_bytes(dec)
if compress:
return convertor.bytes_to_wif(seed, True)
else:
return convertor.bytes_to_wif(seed, False)
# ----------------------------------------------------------
def Binary_To_Bytes(binary_str: str) -> bytes:
return convertor.binary_to_bytes(binary_str)
# ----------------------------------------------------------
def Binary_To_Address(binary_str: str, compress: bool = False) -> str:
seed = convertor.binary_to_bytes(binary_str)
if compress:
return convertor.bytes_to_addr(seed, True)
else:
return convertor.bytes_to_addr(seed, False)
# ----------------------------------------------------------
def Binary_To_PrivateKey(binary_str: str) -> str: return convertor.bytes_to_hex(convertor.binary_to_bytes(binary_str))
# ----------------------------------------------------------
def Binary_To_Mnemonic(binary_str: str) -> str: return convertor.bytes_to_mne(convertor.binary_to_bytes(binary_str))
# ----------------------------------------------------------
def Binary_To_XPRV(binary_str: str) -> str: return convertor.bytes_to_xprv(convertor.binary_to_bytes(binary_str))
# ----------------------------------------------------------
def Binary_To_XPUB(binary_str: str) -> str: return convertor.bytes_to_xpub(convertor.binary_to_bytes(binary_str))
# ----------------------------------------------------------
def Binary_To_Wif(binary_str: str, compress: bool = False) -> str: return convertor.bytes_to_wif(
convertor.binary_to_bytes(binary_str), compress)
# ----------------------------------------------------------
def Binary_To_PublicKey(binary_str: str, compress: bool = False) -> str: return convertor.bytes_to_public(
convertor.binary_to_bytes(binary_str), compress).hex()
# ----------------------------------------------------------
def Binary_To_Decimal(binary_str: str) -> int: return convertor.bytes_to_int(convertor.binary_to_bytes(binary_str))
# ----------------------------------------------------------
def XPRV_To_Bytes(xprv: str) -> bytes: return convertor.xprv_to_bytes(xprv)
def XPRV_To_PrivateKey(xprv: str) -> str: return convertor.bytes_to_hex(convertor.xprv_to_bytes(xprv))
def XPRV_To_PublicKey(xprv: str, compress: bool = False) -> str: return convertor.bytes_to_public(
convertor.xprv_to_bytes(xprv), compress).hex()
def XPRV_To_Wif(xprv: str, compress: bool = False) -> str: return convertor.bytes_to_wif(convertor.xprv_to_bytes(xprv),
compress)
def XPRV_To_Address(xprv: str, compress: bool = False) -> str: return convertor.bytes_to_addr(
convertor.xprv_to_bytes(xprv), compress)
def XPRV_To_Mnemonic(xprv: str) -> str: return convertor.bytes_to_mne(convertor.xprv_to_bytes(xprv))
def XPRV_To_XPUB(xprv: str) -> str: return convertor.bytes_to_xpub(convertor.xprv_to_bytes(xprv))
def XPRV_To_Decimal(xprv: str) -> int: return convertor.bytes_to_int(convertor.xprv_to_bytes(xprv))
# ----------------------------------------------------------
def PrivateKey_To_Bitcoin_Addr(privatekey: str, Type: str = 'p2pkh') -> str:
"""
Convert Private Key To Bitcoin All Type Address, Type: p2pkh, p2sh, p2wpkh, p2wsh, p2wpkh_p2sh, p2wsh_p2sh.
:param privatekey:
:type privatekey: str
:param Type:
:type Type: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Bitcoin_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> p2pkh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2pkh')
>>> p2sh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2sh')
>>> p2wpkh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wpkh')
>>> p2wsh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wsh')
>>> p2wpkh_p2sh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wpkh_p2sh')
>>> p2wsh_p2sh = PrivateKey_To_Bitcoin_Addr(Privatekey, 'p2wsh_p2sh')
--------------------------------------------------------
"""
BTC = Bitcoin()
if Type == 'p2pkh':
return BTC.hex_addr(privatekey, 'p2pkh')
elif Type == 'p2sh':
return BTC.hex_addr(privatekey, 'p2sh')
elif Type == 'p2wpkh':
return BTC.hex_addr(privatekey, 'p2wpkh')
elif Type == 'p2wsh':
return BTC.hex_addr(privatekey, 'p2wsh')
elif Type == 'p2wpkh_p2sh':
return BTC.hex_addr(privatekey, 'p2wpkh_p2sh')
elif Type == 'p2wsh_p2sh':
return BTC.hex_addr(privatekey, 'p2wsh_p2sh')
else:
return BTC.hex_addr(privatekey, 'p2pkh')
# ----------------------------------------------------------
def PrivateKey_To_Ethereum_Addr(privatekey: str) -> str:
"""
Convert Private Key To Ethereum Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Ethereum_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_Ethereum_Addr(Privatekey)
--------------------------------------------------------
"""
ETH = Ethereum()
return ETH.hex_addr(privatekey)
# ----------------------------------------------------------
def PrivateKey_To_BitcoinGold_Addr(privatekey: str) -> str:
"""
Convert Private Key To Bitcoin Gold Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_BitcoinGold_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_BitcoinGold_Addr(Privatekey)
--------------------------------------------------------
"""
BTG = BitcoinGold()
return BTG.hex_addr(privatekey)
# ----------------------------------------------------------
def PrivateKey_To_Dash_Addr(privatekey: str) -> str:
"""
Convert Private Key To Dash Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Dash_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_Dash_Addr(Privatekey)
--------------------------------------------------------
"""
DASH = Dash()
return DASH.hex_addr(privatekey)
# ----------------------------------------------------------
def PrivateKey_To_DigiByte_Addr(privatekey: str) -> str:
"""
Convert Private Key To Digibyte Address.
:param privatekey:
:type privatekey: str
:returns:
--------------------------------------------------------
>>> from cryptofuzz.Wallet import PrivateKey_To_Digibyte_Addr
>>> Privatekey = "e3bfc1c...ca52b8" # example key
>>> addr = PrivateKey_To_DigiByte_Addr(Privatekey)
--------------------------------------------------------
""" | DGB = DigiByte() | 6 | 2023-11-10 14:51:41+00:00 | 12k |
atlantic-quantum/Shipyard | shipyard/passes/timing_constraints.py | [
{
"identifier": "ActivationRecord",
"path": "shipyard/call_stack.py",
"snippet": "class ActivationRecord:\n \"\"\"Activation Records for shipyard\"\"\"\n\n def __init__(\n self,\n name: str,\n ar_type: ARType,\n nesting_level: int,\n ):\n self.name = name\n ... | from contextlib import contextmanager
from openpulse import ast
from openpulse.printer import dumps
from ..call_stack import ActivationRecord, ARType
from ..compiler_error import Error, ErrorCode
from ..setup.internal import SetupInternal
from .interpreter import Interpreter
import numpy as np | 8,836 | """Check that waveforms meet ZI timing constraints"""
class TimingConstraints(Interpreter):
"""
Analyzes the waveforms played or captured in the program to make sure they meet
the timing constraints of the ZI hardware.
Args:
minimum_length (int | None):
minimum length of the waveform in samples (default: 32)
granularity (int | None):
granularity of the waveform in samples (default: 16)
"""
def __init__(
self,
setup: SetupInternal = None,
external_funcs: dict = None,
minimum_length: int = 32,
granularity: int = 16,
) -> None:
self.minimum_length = minimum_length
self.granularity = granularity
self.flagged_wfs = []
super().__init__(setup=setup, external_funcs=external_funcs)
def check_timing_constraints(self, node, delay_flag=False) -> tuple[bool, int]:
"""
Checks the timing constraints of a waveform
Args:
node
can be various types
Returns:
bool: True if the waveform meets the timing constraints
int: length of the waveform
"""
dur_val = self.visit(node)
if isinstance(dur_val, np.ndarray):
dur_val = len(dur_val)
elif dur_val is None: # should occur during ExecuteTableEntry
return True, -1
return (
dur_val >= self.minimum_length and dur_val % self.granularity == 0
), dur_val
def visit_Program(self, node: ast.Program) -> None:
activation_record = ActivationRecord(
name="main", ar_type=ARType.PROGRAM, nesting_level=1
)
for extern in self.external_funcs:
activation_record[extern] = "external"
with self.ar_context_manager(activation_record):
for statement in node.statements:
self.visit(statement)
if self.flagged_wfs:
total_message = self.construct_warning_message()
| """Check that waveforms meet ZI timing constraints"""
class TimingConstraints(Interpreter):
"""
Analyzes the waveforms played or captured in the program to make sure they meet
the timing constraints of the ZI hardware.
Args:
minimum_length (int | None):
minimum length of the waveform in samples (default: 32)
granularity (int | None):
granularity of the waveform in samples (default: 16)
"""
def __init__(
self,
setup: SetupInternal = None,
external_funcs: dict = None,
minimum_length: int = 32,
granularity: int = 16,
) -> None:
self.minimum_length = minimum_length
self.granularity = granularity
self.flagged_wfs = []
super().__init__(setup=setup, external_funcs=external_funcs)
def check_timing_constraints(self, node, delay_flag=False) -> tuple[bool, int]:
"""
Checks the timing constraints of a waveform
Args:
node
can be various types
Returns:
bool: True if the waveform meets the timing constraints
int: length of the waveform
"""
dur_val = self.visit(node)
if isinstance(dur_val, np.ndarray):
dur_val = len(dur_val)
elif dur_val is None: # should occur during ExecuteTableEntry
return True, -1
return (
dur_val >= self.minimum_length and dur_val % self.granularity == 0
), dur_val
def visit_Program(self, node: ast.Program) -> None:
activation_record = ActivationRecord(
name="main", ar_type=ARType.PROGRAM, nesting_level=1
)
for extern in self.external_funcs:
activation_record[extern] = "external"
with self.ar_context_manager(activation_record):
for statement in node.statements:
self.visit(statement)
if self.flagged_wfs:
total_message = self.construct_warning_message() | raise Error( | 2 | 2023-11-16 17:37:29+00:00 | 12k |
KevinXu02/ControlledDreamGaussian | frankmocap/bodymocap/body_mocap_api.py | [
{
"identifier": "hmr",
"path": "frankmocap/bodymocap/models/hmr.py",
"snippet": "def hmr(smpl_mean_params, pretrained=True, **kwargs):\n \"\"\" Constructs an HMR model with ResNet50 backbone.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n mode... | import cv2
import sys
import torch
import numpy as np
import pickle
import frankmocap.mocap_utils.geometry_utils as gu
from torchvision.transforms import Normalize
from frankmocap.bodymocap.models import hmr, SMPL, SMPLX
from frankmocap.bodymocap import constants
from frankmocap.bodymocap.utils.imutils import crop, crop_bboxInfo, process_image_bbox, process_image_keypoints, \
bbox_from_keypoints
from frankmocap.mocap_utils.coordconv import convert_smpl_to_bbox, convert_bbox_to_oriIm | 7,441 | # Copyright (c) Facebook, Inc. and its affiliates.
class BodyMocap(object):
def __init__(self, regressor_checkpoint, smpl_dir, device=torch.device('cuda'), use_smplx=False):
self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Load parametric model (SMPLX or SMPL)
if use_smplx:
smplModelPath = smpl_dir + '/SMPLX_NEUTRAL.pkl'
self.smpl = SMPLX(smpl_dir,
batch_size=1,
num_betas=10,
use_pca=False,
create_transl=False).to(self.device)
self.use_smplx = True
else:
smplModelPath = smpl_dir + '/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'
self.smpl = SMPL(smplModelPath, batch_size=1, create_transl=False).to(self.device)
self.use_smplx = False
# Load pre-trained neural network
SMPL_MEAN_PARAMS = './frankmocap/extra_data/body_module/data_from_spin/smpl_mean_params.npz'
self.model_regressor = hmr(SMPL_MEAN_PARAMS).to(self.device)
checkpoint = torch.load(regressor_checkpoint)
self.model_regressor.load_state_dict(checkpoint['model'], strict=False)
self.model_regressor.eval()
def regress(self, img_original, body_bbox_list):
"""
args:
img_original: original raw image (BGR order by using cv2.imread)
body_bbox: bounding box around the target: (minX, minY, width, height)
outputs:
pred_vertices_img:
pred_joints_vis_img:
pred_rotmat
pred_betas
pred_camera
bbox: [bbr[0], bbr[1],bbr[0]+bbr[2], bbr[1]+bbr[3]])
bboxTopLeft: bbox top left (redundant)
boxScale_o2n: bbox scaling factor (redundant)
"""
pred_output_list = list()
for body_bbox in body_bbox_list:
img, norm_img, boxScale_o2n, bboxTopLeft, bbox = process_image_bbox(
img_original, body_bbox, input_res=constants.IMG_RES)
bboxTopLeft = np.array(bboxTopLeft)
# bboxTopLeft = bbox['bboxXYWH'][:2]
if img is None:
pred_output_list.append(None)
continue
with torch.no_grad():
# model forward
pred_rotmat, pred_betas, pred_camera = self.model_regressor(norm_img.to(self.device))
# Convert rot_mat to aa since hands are always in aa
# pred_aa = rotmat3x3_to_angle_axis(pred_rotmat)
pred_aa = gu.rotation_matrix_to_angle_axis(pred_rotmat).cuda()
pred_aa = pred_aa.reshape(pred_aa.shape[0], 72)
# remove global rotation
pred_aa[:, :3] = 0
smpl_output = self.smpl(
betas=pred_betas,
body_pose=pred_aa[:, 3:],
global_orient=pred_aa[:, :3],
pose2rot=True)
pred_vertices = smpl_output.vertices
pred_joints_3d = smpl_output.joints
pred_vertices = pred_vertices[0].cpu().numpy()
pred_camera = pred_camera.cpu().numpy().ravel()
camScale = pred_camera[0] # *1.15
camTrans = pred_camera[1:]
pred_output = dict()
# Convert mesh to original image space (X,Y are aligned to image)
# 1. SMPL -> 2D bbox
# 2. 2D bbox -> original 2D image
| # Copyright (c) Facebook, Inc. and its affiliates.
class BodyMocap(object):
def __init__(self, regressor_checkpoint, smpl_dir, device=torch.device('cuda'), use_smplx=False):
self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Load parametric model (SMPLX or SMPL)
if use_smplx:
smplModelPath = smpl_dir + '/SMPLX_NEUTRAL.pkl'
self.smpl = SMPLX(smpl_dir,
batch_size=1,
num_betas=10,
use_pca=False,
create_transl=False).to(self.device)
self.use_smplx = True
else:
smplModelPath = smpl_dir + '/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'
self.smpl = SMPL(smplModelPath, batch_size=1, create_transl=False).to(self.device)
self.use_smplx = False
# Load pre-trained neural network
SMPL_MEAN_PARAMS = './frankmocap/extra_data/body_module/data_from_spin/smpl_mean_params.npz'
self.model_regressor = hmr(SMPL_MEAN_PARAMS).to(self.device)
checkpoint = torch.load(regressor_checkpoint)
self.model_regressor.load_state_dict(checkpoint['model'], strict=False)
self.model_regressor.eval()
def regress(self, img_original, body_bbox_list):
"""
args:
img_original: original raw image (BGR order by using cv2.imread)
body_bbox: bounding box around the target: (minX, minY, width, height)
outputs:
pred_vertices_img:
pred_joints_vis_img:
pred_rotmat
pred_betas
pred_camera
bbox: [bbr[0], bbr[1],bbr[0]+bbr[2], bbr[1]+bbr[3]])
bboxTopLeft: bbox top left (redundant)
boxScale_o2n: bbox scaling factor (redundant)
"""
pred_output_list = list()
for body_bbox in body_bbox_list:
img, norm_img, boxScale_o2n, bboxTopLeft, bbox = process_image_bbox(
img_original, body_bbox, input_res=constants.IMG_RES)
bboxTopLeft = np.array(bboxTopLeft)
# bboxTopLeft = bbox['bboxXYWH'][:2]
if img is None:
pred_output_list.append(None)
continue
with torch.no_grad():
# model forward
pred_rotmat, pred_betas, pred_camera = self.model_regressor(norm_img.to(self.device))
# Convert rot_mat to aa since hands are always in aa
# pred_aa = rotmat3x3_to_angle_axis(pred_rotmat)
pred_aa = gu.rotation_matrix_to_angle_axis(pred_rotmat).cuda()
pred_aa = pred_aa.reshape(pred_aa.shape[0], 72)
# remove global rotation
pred_aa[:, :3] = 0
smpl_output = self.smpl(
betas=pred_betas,
body_pose=pred_aa[:, 3:],
global_orient=pred_aa[:, :3],
pose2rot=True)
pred_vertices = smpl_output.vertices
pred_joints_3d = smpl_output.joints
pred_vertices = pred_vertices[0].cpu().numpy()
pred_camera = pred_camera.cpu().numpy().ravel()
camScale = pred_camera[0] # *1.15
camTrans = pred_camera[1:]
pred_output = dict()
# Convert mesh to original image space (X,Y are aligned to image)
# 1. SMPL -> 2D bbox
# 2. 2D bbox -> original 2D image | pred_vertices_bbox = convert_smpl_to_bbox(pred_vertices, camScale, camTrans) | 9 | 2023-11-17 05:21:26+00:00 | 12k |
dazhangyu123/OCL | train_source.py | [
{
"identifier": "Eval",
"path": "utils/eval.py",
"snippet": "class Eval():\n def __init__(self, num_class):\n self.num_class = num_class\n self.confusion_matrix = np.zeros((self.num_class,)*2)\n self.ignore_index = None\n self.synthia = True if num_class == 16 else False\n... | import os
import random
import logging
import argparse
import torch
import torch.nn as nn
import torch.utils.data as data
import torch.nn.functional as F
import numpy as np
import sys
import shutil
from tqdm import tqdm
from math import ceil
from distutils.version import LooseVersion
from tensorboardX import SummaryWriter
from torchvision.utils import make_grid
from utils.eval import Eval
from utils.train_helper import get_model
from datasets.cityscapes_Dataset import City_Dataset, City_DataLoader, inv_preprocess, decode_labels
from datasets.gta5_Dataset import GTA5_DataLoader
from datasets.synthia_Dataset import SYNTHIA_DataLoader | 8,602 | # validate
PA, MPA, MIoU, FWIoU = self.validate()
self.writer.add_scalar('PA', PA, self.current_epoch)
self.writer.add_scalar('MPA', MPA, self.current_epoch)
self.writer.add_scalar('MIoU', MIoU, self.current_epoch)
self.writer.add_scalar('FWIoU', FWIoU, self.current_epoch)
self.current_MIoU = MIoU
is_best = MIoU > self.best_MIou
if is_best:
self.best_MIou = MIoU
self.best_iter = self.current_iter
self.logger.info("=>saving a new best checkpoint...")
self.save_checkpoint(self.train_id+'best.pth')
else:
self.logger.info("=> The MIoU of val does't improve.")
self.logger.info("=> The best MIoU of val is {} at {}".format(self.best_MIou, self.best_iter))
self.current_epoch += 1
state = {
'epoch': self.current_epoch + 1,
'iteration': self.current_iter,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_MIou': self.current_MIoU
}
self.logger.info("=>best_MIou {} at {}".format(self.best_MIou, self.best_iter))
self.logger.info("=>saving the final checkpoint to " + os.path.join(self.args.checkpoint_dir, self.train_id+'final.pth'))
self.save_checkpoint(self.train_id+'final.pth')
def train_one_epoch(self):
tqdm_epoch = tqdm(self.dataloader.data_loader, total=self.dataloader.num_iterations,
desc="Train Epoch-{}-total-{}".format(self.current_epoch+1, self.epoch_num))
self.logger.info("Training one epoch...")
self.Eval.reset()
train_loss = []
loss_seg_value_2 = 0
iter_num = self.dataloader.num_iterations
if self.args.freeze_bn:
self.model.eval()
self.logger.info("freeze bacth normalization successfully!")
else:
self.model.train()
# Initialize your average meters
batch_idx = 0
for x, y, _ in tqdm_epoch:
self.poly_lr_scheduler(
optimizer=self.optimizer,
init_lr=self.args.lr,
iter=self.current_iter,
max_iter=self.args.iter_max,
power=self.args.poly_power,
)
if self.args.iter_stop is not None and self.current_iter >= self.args.iter_stop:
self.logger.info("iteration arrive {}(early stop)/{}(total step)!".format(self.args.iter_stop, self.args.iter_max))
break
if self.current_iter >= self.args.iter_max:
self.logger.info("iteration arrive {}!".format(self.args.iter_max))
break
self.writer.add_scalar('learning_rate', self.optimizer.param_groups[0]["lr"], self.current_iter)
if self.cuda:
x, y = x.to(self.device), y.to(device=self.device, dtype=torch.long)
y = torch.squeeze(y, 1)
self.optimizer.zero_grad()
# model
pred = self.model(x)
if isinstance(pred, tuple):
pred_2 = pred[1]
pred = pred[0]
pred = F.interpolate(pred, size=x.size()[2:], mode='bilinear', align_corners=True)
# loss
cur_loss = self.loss(pred, y)
if self.args.multi:
loss_2 = self.args.lambda_seg * self.loss(pred_2, y)
cur_loss += loss_2
loss_seg_value_2 += loss_2.cpu().item() / iter_num
# optimizer
cur_loss.backward()
self.optimizer.step()
train_loss.append(cur_loss.item())
if batch_idx % 1000 == 0:
if self.args.multi:
self.logger.info("The train loss of epoch{}-batch-{}:{};{}".format(self.current_epoch,
batch_idx, cur_loss.item(), loss_2.item()))
else:
self.logger.info("The train loss of epoch{}-batch-{}:{}".format(self.current_epoch,
batch_idx, cur_loss.item()))
batch_idx += 1
self.current_iter += 1
if np.isnan(float(cur_loss.item())):
raise ValueError('Loss is nan during training...')
pred = pred.data.cpu().numpy()
label = y.cpu().numpy()
argpred = np.argmax(pred, axis=1)
self.Eval.add_batch(label, argpred)
if batch_idx==self.dataloader.num_iterations:
break
self.log_one_train_epoch(x, label, argpred, train_loss)
tqdm_epoch.close()
def log_one_train_epoch(self, x, label, argpred, train_loss):
#show train image on tensorboard
images_inv = inv_preprocess(x.clone().cpu(), self.args.show_num_images, numpy_transform=self.args.numpy_transform)
|
sys.path.append(os.path.abspath('tools'))
datasets_path={
'cityscapes': {'data_root_path': '/mnt/Xsky/zyl/dataset/dataset/Cityscapes', 'list_path': './datasets/city_list',
'image_path':'/mnt/Xsky/zyl/dataset/Cityscapes/leftImg8bit',
'gt_path': './datasets/Cityscapes/gtFine'},
'gta5': {'data_root_path': '/mnt/Xsky/zyl/dataset/GTA5', 'list_path': './datasets/gta5_list',
'image_path':'/mnt/Xsky/zyl/dataset/GTA5/images',
'gt_path': './datasets/GTA5/labels'},
'synthia': {'data_root_path': '/mnt/Xsky/zyl/dataset/RAND_CITYSCAPES', 'list_path': './datasets/synthia_list',
'image_path':'/mnt/Xsky/zyl/dataset/RAND_CITYSCAPES/RGB',
'gt_path': './datasets/SYNTHIA/GT/LABELS'},
'NTHU': {'data_root_path': './datasets/NTHU_Datasets', 'list_path': './datasets/NTHU_list'}
}
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.')
ITER_MAX = 5000
class Trainer():
def __init__(self, args, cuda=None, train_id="None", logger=None):
self.args = args
os.environ["CUDA_VISIBLE_DEVICES"] = self.args.gpu
self.cuda = cuda and torch.cuda.is_available()
self.device = torch.device('cuda' if self.cuda else 'cpu')
self.train_id = train_id
self.logger = logger
self.current_MIoU = 0
self.best_MIou = 0
self.best_source_MIou = 0
self.current_epoch = 0
self.current_iter = 0
self.second_best_MIou = 0
# set TensorboardX
self.writer = SummaryWriter(self.args.checkpoint_dir)
# Metric definition
self.Eval = Eval(self.args.num_classes)
# loss definition
self.loss = nn.CrossEntropyLoss(weight=None, ignore_index= -1)
self.loss.to(self.device)
# model
self.model, params = get_model(self.args)
self.model = nn.DataParallel(self.model, device_ids=[0])
self.model.to(self.device)
if self.args.optim == "SGD":
self.optimizer = torch.optim.SGD(
params=params,
momentum=self.args.momentum,
weight_decay=self.args.weight_decay
)
elif self.args.optim == "Adam":
self.optimizer = torch.optim.Adam(params, betas=(0.9, 0.99), weight_decay=self.args.weight_decay)
# dataloader
if self.args.dataset=="cityscapes":
self.dataloader = City_DataLoader(self.args)
elif self.args.dataset=="gta5":
self.dataloader = GTA5_DataLoader(self.args)
else:
self.dataloader = SYNTHIA_DataLoader(self.args)
self.dataloader.num_iterations = min(self.dataloader.num_iterations, ITER_MAX)
print(self.args.iter_max, self.dataloader.num_iterations)
self.epoch_num = ceil(self.args.iter_max / self.dataloader.num_iterations) if self.args.iter_stop is None else \
ceil(self.args.iter_stop / self.dataloader.num_iterations)
def main(self):
# display args details
self.logger.info("Global configuration as follows:")
for key, val in vars(self.args).items():
self.logger.info("{:16} {}".format(key, val))
# choose cuda
if self.cuda:
current_device = torch.cuda.current_device()
self.logger.info("This model will run on {}".format(torch.cuda.get_device_name(current_device)))
else:
self.logger.info("This model will run on CPU")
# load pretrained checkpoint
if self.args.pretrained_ckpt_file is not None:
if os.path.isdir(self.args.pretrained_ckpt_file):
self.args.pretrained_ckpt_file = os.path.join(self.args.checkpoint_dir, self.train_id + 'best.pth')
self.load_checkpoint(self.args.pretrained_ckpt_file)
if self.args.continue_training:
self.load_checkpoint(os.path.join(self.args.checkpoint_dir, self.train_id + 'best.pth'))
self.best_iter = self.current_iter
self.best_source_iter = self.current_iter
else:
self.current_epoch = 0
# train
self.train()
self.writer.close()
def train(self):
# self.validate() # check image summary
for epoch in tqdm(range(self.current_epoch, self.epoch_num),
desc="Total {} epochs".format(self.epoch_num)):
self.train_one_epoch()
# validate
PA, MPA, MIoU, FWIoU = self.validate()
self.writer.add_scalar('PA', PA, self.current_epoch)
self.writer.add_scalar('MPA', MPA, self.current_epoch)
self.writer.add_scalar('MIoU', MIoU, self.current_epoch)
self.writer.add_scalar('FWIoU', FWIoU, self.current_epoch)
self.current_MIoU = MIoU
is_best = MIoU > self.best_MIou
if is_best:
self.best_MIou = MIoU
self.best_iter = self.current_iter
self.logger.info("=>saving a new best checkpoint...")
self.save_checkpoint(self.train_id+'best.pth')
else:
self.logger.info("=> The MIoU of val does't improve.")
self.logger.info("=> The best MIoU of val is {} at {}".format(self.best_MIou, self.best_iter))
self.current_epoch += 1
state = {
'epoch': self.current_epoch + 1,
'iteration': self.current_iter,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_MIou': self.current_MIoU
}
self.logger.info("=>best_MIou {} at {}".format(self.best_MIou, self.best_iter))
self.logger.info("=>saving the final checkpoint to " + os.path.join(self.args.checkpoint_dir, self.train_id+'final.pth'))
self.save_checkpoint(self.train_id+'final.pth')
def train_one_epoch(self):
tqdm_epoch = tqdm(self.dataloader.data_loader, total=self.dataloader.num_iterations,
desc="Train Epoch-{}-total-{}".format(self.current_epoch+1, self.epoch_num))
self.logger.info("Training one epoch...")
self.Eval.reset()
train_loss = []
loss_seg_value_2 = 0
iter_num = self.dataloader.num_iterations
if self.args.freeze_bn:
self.model.eval()
self.logger.info("freeze bacth normalization successfully!")
else:
self.model.train()
# Initialize your average meters
batch_idx = 0
for x, y, _ in tqdm_epoch:
self.poly_lr_scheduler(
optimizer=self.optimizer,
init_lr=self.args.lr,
iter=self.current_iter,
max_iter=self.args.iter_max,
power=self.args.poly_power,
)
if self.args.iter_stop is not None and self.current_iter >= self.args.iter_stop:
self.logger.info("iteration arrive {}(early stop)/{}(total step)!".format(self.args.iter_stop, self.args.iter_max))
break
if self.current_iter >= self.args.iter_max:
self.logger.info("iteration arrive {}!".format(self.args.iter_max))
break
self.writer.add_scalar('learning_rate', self.optimizer.param_groups[0]["lr"], self.current_iter)
if self.cuda:
x, y = x.to(self.device), y.to(device=self.device, dtype=torch.long)
y = torch.squeeze(y, 1)
self.optimizer.zero_grad()
# model
pred = self.model(x)
if isinstance(pred, tuple):
pred_2 = pred[1]
pred = pred[0]
pred = F.interpolate(pred, size=x.size()[2:], mode='bilinear', align_corners=True)
# loss
cur_loss = self.loss(pred, y)
if self.args.multi:
loss_2 = self.args.lambda_seg * self.loss(pred_2, y)
cur_loss += loss_2
loss_seg_value_2 += loss_2.cpu().item() / iter_num
# optimizer
cur_loss.backward()
self.optimizer.step()
train_loss.append(cur_loss.item())
if batch_idx % 1000 == 0:
if self.args.multi:
self.logger.info("The train loss of epoch{}-batch-{}:{};{}".format(self.current_epoch,
batch_idx, cur_loss.item(), loss_2.item()))
else:
self.logger.info("The train loss of epoch{}-batch-{}:{}".format(self.current_epoch,
batch_idx, cur_loss.item()))
batch_idx += 1
self.current_iter += 1
if np.isnan(float(cur_loss.item())):
raise ValueError('Loss is nan during training...')
pred = pred.data.cpu().numpy()
label = y.cpu().numpy()
argpred = np.argmax(pred, axis=1)
self.Eval.add_batch(label, argpred)
if batch_idx==self.dataloader.num_iterations:
break
self.log_one_train_epoch(x, label, argpred, train_loss)
tqdm_epoch.close()
def log_one_train_epoch(self, x, label, argpred, train_loss):
#show train image on tensorboard
images_inv = inv_preprocess(x.clone().cpu(), self.args.show_num_images, numpy_transform=self.args.numpy_transform) | labels_colors = decode_labels(label, self.args.show_num_images) | 5 | 2023-11-14 02:01:11+00:00 | 12k |
raphaelreme/koft | src/experiments/track.py | [
{
"identifier": "FakeDetector",
"path": "src/detector.py",
"snippet": "class FakeDetector(byotrack.Detector): # TODO: include weight\n def __init__(self, mu: torch.Tensor, noise=1.0, fpr=0.1, fnr=0.2, generate_outside_particles=True):\n self.noise = noise\n self.fpr = fpr\n self... | import dataclasses
import enum
import pathlib
import dacite
import torch
import tqdm # type: ignore
import yaml # type: ignore
import byotrack
from typing import Collection, List
from byotrack.implementation.detector.wavelet import WaveletDetector
from byotrack.implementation.linker.icy_emht import EMHTParameters, IcyEMHTLinker, Motion
from byotrack.implementation.linker.trackmate.trackmate import TrackMateLinker, TrackMateParameters
from byotrack.implementation.refiner.interpolater import ForwardBackwardInterpolater
from ..detector import FakeDetector
from ..metrics.detections import DetectionMetric
from ..metrics.tracking import compute_tracking_metrics
from ..skt import constant_kalman_filter, Dist, Method, MatchingConfig, SimpleKalmanTracker, PartialTrack
from ..koft import constant_koft_filter, OptFlowExtraction, SingleUpdateKOFTracker, TwoUpdateKOFTracker
from ..optical_flow import farneback
from ..utils import enforce_all_seeds | 10,339 | matching_method: Method
always_update_velocities: bool = True
dim: int = 2
order: int = 1
class TrackingMethod(enum.Enum):
SKT = "skt"
KOFT = "koft"
KOFTmm = "koft--"
KOFTpp = "koft++"
TRACKMATE = "trackmate"
TRACKMATE_KF = "trackmate-kf"
EMHT = "emht"
@dataclasses.dataclass
class ExperimentConfig:
seed: int
simulation_path: pathlib.Path
tracking_method: TrackingMethod
detection: DetectionConfig
kalman: KalmanConfig
icy_path: pathlib.Path
fiji_path: pathlib.Path
def create_linker(self, thresh: float) -> byotrack.Linker:
"""Create a linker"""
if self.tracking_method is TrackingMethod.EMHT:
return IcyEMHTLinker(
self.icy_path,
EMHTParameters(
gate_factor=thresh,
motion=Motion.MULTI,
tree_depth=2,
),
)
if self.tracking_method in (TrackingMethod.TRACKMATE, TrackingMethod.TRACKMATE_KF):
# As kalman tracking we let a gap of 2 consecutive miss detections
# In that case, we allow 1.5 thresh
return TrackMateLinker(
self.fiji_path,
TrackMateParameters(
max_frame_gap=PartialTrack.MAX_NON_MEASURE,
linking_max_distance=thresh,
gap_closing_max_distance=thresh * 1.5,
kalman_search_radius=thresh if self.tracking_method is TrackingMethod.TRACKMATE_KF else None,
),
)
if self.tracking_method is TrackingMethod.SKT:
kalman_filter = constant_kalman_filter(
torch.tensor(self.kalman.detection_noise),
torch.tensor(self.kalman.process_noise),
self.kalman.dim,
self.kalman.order,
)
return SimpleKalmanTracker(
kalman_filter, MatchingConfig(thresh, self.kalman.dist, self.kalman.matching_method)
)
# self.tracking_method is TrackingMethod.KOFT:
kalman_filter = constant_koft_filter(
torch.tensor(self.kalman.detection_noise),
torch.tensor(self.kalman.of_noise),
torch.tensor(self.kalman.process_noise),
self.kalman.dim,
self.kalman.order,
)
if self.tracking_method is TrackingMethod.KOFTmm:
return SingleUpdateKOFTracker(
kalman_filter, farneback, MatchingConfig(thresh, self.kalman.dist, self.kalman.matching_method)
)
# <=> two updates, without updating vel for all tracks and using OptFlowExtraction at Detected pos
# return TwoUpdateKOFTracker(
# kalman_filter,
# farneback,
# MatchingConfig(thresh, self.kalman.dist, self.kalman.matching_method),
# OptFlowExtraction.DETECTED,
# False,
# )
PartialTrack.MAX_NON_MEASURE = 5 if self.tracking_method is TrackingMethod.KOFTpp else 3
return TwoUpdateKOFTracker(
kalman_filter,
farneback,
MatchingConfig(thresh, self.kalman.dist, self.kalman.matching_method),
OptFlowExtraction.POSTERIOR,
self.kalman.always_update_velocities,
)
def create_thresholds(self) -> List[float]:
if self.tracking_method is TrackingMethod.EMHT:
# XXX: EMHT struggle to converge in some scenarios with high frp and fnr.
# On those where it converges 3.0 is the best, and it converges for 3.0 in all of them
# So lets manually select [3.0] in high fpr/fnr. In other cases, let's keep the default grid search
# return [3.0]
return [3.0, 4.0, 5.0, 6.0] # MAHA
if (
self.tracking_method in (TrackingMethod.TRACKMATE, TrackingMethod.TRACKMATE_KF)
or self.kalman.dist is Dist.EUCLIDIAN
):
return [3.0, 5.0, 7.0, 10.0, 15.0]
if self.kalman.dist is Dist.MAHALANOBIS:
return [0.5, 1.0, 2.0, 3.0, 4.0]
# self.dist is Dist.LIKELIHOOD:
return [1e-6, 1e-5, 1e-4, 1e-3, 1e-2]
def main(name: str, cfg_data: dict) -> None:
print("Running:", name)
print(yaml.dump(cfg_data))
cfg = dacite.from_dict(ExperimentConfig, cfg_data, dacite.Config(cast=[pathlib.Path, tuple, enum.Enum]))
|
class DetectionMethod(enum.Enum):
WAVELET = "wavelet"
FAKE = "fake"
@dataclasses.dataclass
class WaveletConfig:
k: float = 3.0
scale: int = 1
min_area: float = 10.0
@dataclasses.dataclass
class FakeConfig:
fpr: float = 0.1 # Bad detection rate
fnr: float = 0.2 # Miss detection rate
measurement_noise: float = 1.0
@dataclasses.dataclass
class DetectionConfig:
detector: DetectionMethod
wavelet: WaveletConfig
fake: FakeConfig
# interactive = False # Could tweak the detector parameters interactively ?
def create_detector(self, mu: torch.Tensor) -> byotrack.Detector:
if self.detector == DetectionMethod.WAVELET:
return WaveletDetector(self.wavelet.scale, self.wavelet.k, self.wavelet.min_area)
return FakeDetector(mu, self.fake.measurement_noise, self.fake.fpr, self.fake.fnr)
@dataclasses.dataclass
class KalmanConfig:
detection_noise: float
of_noise: float
process_noise: float # Miss evaluation of the process
dist: Dist
matching_method: Method
always_update_velocities: bool = True
dim: int = 2
order: int = 1
class TrackingMethod(enum.Enum):
SKT = "skt"
KOFT = "koft"
KOFTmm = "koft--"
KOFTpp = "koft++"
TRACKMATE = "trackmate"
TRACKMATE_KF = "trackmate-kf"
EMHT = "emht"
@dataclasses.dataclass
class ExperimentConfig:
seed: int
simulation_path: pathlib.Path
tracking_method: TrackingMethod
detection: DetectionConfig
kalman: KalmanConfig
icy_path: pathlib.Path
fiji_path: pathlib.Path
def create_linker(self, thresh: float) -> byotrack.Linker:
"""Create a linker"""
if self.tracking_method is TrackingMethod.EMHT:
return IcyEMHTLinker(
self.icy_path,
EMHTParameters(
gate_factor=thresh,
motion=Motion.MULTI,
tree_depth=2,
),
)
if self.tracking_method in (TrackingMethod.TRACKMATE, TrackingMethod.TRACKMATE_KF):
# As kalman tracking we let a gap of 2 consecutive miss detections
# In that case, we allow 1.5 thresh
return TrackMateLinker(
self.fiji_path,
TrackMateParameters(
max_frame_gap=PartialTrack.MAX_NON_MEASURE,
linking_max_distance=thresh,
gap_closing_max_distance=thresh * 1.5,
kalman_search_radius=thresh if self.tracking_method is TrackingMethod.TRACKMATE_KF else None,
),
)
if self.tracking_method is TrackingMethod.SKT:
kalman_filter = constant_kalman_filter(
torch.tensor(self.kalman.detection_noise),
torch.tensor(self.kalman.process_noise),
self.kalman.dim,
self.kalman.order,
)
return SimpleKalmanTracker(
kalman_filter, MatchingConfig(thresh, self.kalman.dist, self.kalman.matching_method)
)
# self.tracking_method is TrackingMethod.KOFT:
kalman_filter = constant_koft_filter(
torch.tensor(self.kalman.detection_noise),
torch.tensor(self.kalman.of_noise),
torch.tensor(self.kalman.process_noise),
self.kalman.dim,
self.kalman.order,
)
if self.tracking_method is TrackingMethod.KOFTmm:
return SingleUpdateKOFTracker(
kalman_filter, farneback, MatchingConfig(thresh, self.kalman.dist, self.kalman.matching_method)
)
# <=> two updates, without updating vel for all tracks and using OptFlowExtraction at Detected pos
# return TwoUpdateKOFTracker(
# kalman_filter,
# farneback,
# MatchingConfig(thresh, self.kalman.dist, self.kalman.matching_method),
# OptFlowExtraction.DETECTED,
# False,
# )
PartialTrack.MAX_NON_MEASURE = 5 if self.tracking_method is TrackingMethod.KOFTpp else 3
return TwoUpdateKOFTracker(
kalman_filter,
farneback,
MatchingConfig(thresh, self.kalman.dist, self.kalman.matching_method),
OptFlowExtraction.POSTERIOR,
self.kalman.always_update_velocities,
)
def create_thresholds(self) -> List[float]:
if self.tracking_method is TrackingMethod.EMHT:
# XXX: EMHT struggle to converge in some scenarios with high frp and fnr.
# On those where it converges 3.0 is the best, and it converges for 3.0 in all of them
# So lets manually select [3.0] in high fpr/fnr. In other cases, let's keep the default grid search
# return [3.0]
return [3.0, 4.0, 5.0, 6.0] # MAHA
if (
self.tracking_method in (TrackingMethod.TRACKMATE, TrackingMethod.TRACKMATE_KF)
or self.kalman.dist is Dist.EUCLIDIAN
):
return [3.0, 5.0, 7.0, 10.0, 15.0]
if self.kalman.dist is Dist.MAHALANOBIS:
return [0.5, 1.0, 2.0, 3.0, 4.0]
# self.dist is Dist.LIKELIHOOD:
return [1e-6, 1e-5, 1e-4, 1e-3, 1e-2]
def main(name: str, cfg_data: dict) -> None:
print("Running:", name)
print(yaml.dump(cfg_data))
cfg = dacite.from_dict(ExperimentConfig, cfg_data, dacite.Config(cast=[pathlib.Path, tuple, enum.Enum]))
| enforce_all_seeds(cfg.seed) | 14 | 2023-11-10 10:18:39+00:00 | 12k |
david9dragon9/LOMOLite | lomo/lomo_base.py | [
{
"identifier": "LOMO",
"path": "lomo/lomo_orig.py",
"snippet": "class LOMO(Optimizer):\n \"\"\"\n 一个自定义的优化器类LOMO,用于在分布式训练中的梯度更新。\n\n 该类实现两个梯度更新函数 :meth:`fuse_update` 和 :meth:`fuse_update_zero3`,分别用于非ZeRO和ZeRO模式下的梯度更新。\n\n :param model: 待优化的模型\n :param lr: 学习率,默认值为1e-3\n :param clip_gr... | import torch
import sys
import os
import tqdm
import deepspeed
import deepspeed
import os
from transformers.deepspeed import HfDeepSpeedConfig
from transformers import AutoConfig
from collections import OrderedDict
from lomo.lomo_orig import LOMO
from lomo.adalomo_orig import AdaLomo
from lomo.lomo_utils import LearningRateScheduler, DynamicLossScaler
from deepspeed import comm as dist
from deepspeed.accelerator import get_accelerator | 7,257 | # Source: https://github.com/OpenLMLab/LOMO
# Source: https://github.com/OpenLMLab/collie/tree/dev/collie
try:
except:
pass
def setup_lomo(model_name_or_path):
torch.set_default_dtype(torch.float16)
ds_config = __file__.replace("lomo_base.py", "ds_config.json")
dschf = HfDeepSpeedConfig(ds_config)
config = AutoConfig.from_pretrained(model_name_or_path)
config.gradient_checkpointing = True
return config
def create_lomo_lr_scheduler(
learning_rate=0.03,
n_steps=1000,
num_train_epochs=10,
warmup=0.1,
lr_scheduler_type="linear",
):
| # Source: https://github.com/OpenLMLab/LOMO
# Source: https://github.com/OpenLMLab/collie/tree/dev/collie
try:
except:
pass
def setup_lomo(model_name_or_path):
torch.set_default_dtype(torch.float16)
ds_config = __file__.replace("lomo_base.py", "ds_config.json")
dschf = HfDeepSpeedConfig(ds_config)
config = AutoConfig.from_pretrained(model_name_or_path)
config.gradient_checkpointing = True
return config
def create_lomo_lr_scheduler(
learning_rate=0.03,
n_steps=1000,
num_train_epochs=10,
warmup=0.1,
lr_scheduler_type="linear",
): | return LearningRateScheduler( | 2 | 2023-11-11 03:29:00+00:00 | 12k |
quantuminterface/qiclib | src/qiclib/code/qi_sequencer.py | [
{
"identifier": "QiCellProperty",
"path": "src/qiclib/code/qi_var_definitions.py",
"snippet": "class QiCellProperty(QiExpression):\n \"\"\"When describing experiments, properties of cells might not yet be defined. Instead a QiCellProperty object will be generated.\n This object can be used as leng... | from enum import Enum
from typing import List, Union, Any, Dict, Optional, Tuple
from qiclib.code.qi_jobs import (
ForRange,
If,
Parallel,
cQiRecording,
cQiSync,
)
from .qi_var_definitions import (
QiCellProperty,
QiVariableSet,
_QiCalcBase,
_QiVariableBase,
QiExpression,
_QiConstValue,
QiCondition,
QiOpCond,
QiOp,
)
from .qi_seq_instructions import (
SeqLoad,
SeqStore,
SeqAwaitQubitState,
SequencerInstruction,
SeqRegImmediateInst,
SeqRegRegInst,
SeqLoadUpperImm,
SeqJump,
SeqBranch,
SeqWaitImm,
SeqWaitRegister,
SeqTrigger,
SeqEnd,
SeqTriggerWaitRegister,
)
from .qi_util import _get_for_range_iterations
from .qi_var_definitions import _QiVariableBase
from .qi_var_definitions import _QiCalcBase
from .qi_var_definitions import _QiVariableBase
from .qi_jobs import _cQiPlay_base
import warnings
import qiclib.packages.utility as util | 10,761 | return util.conv_cycles_to_time(self.RECORDING_MODULE_DELAY_CYCLES)
@property
def readout_active(self):
return self._trigger_mods.is_readout_active
@property
def manipulation_active(self):
return self._trigger_mods.is_manipulation_active
def add_variable(self, var):
"""Adds variable to sequencer, reserving a register for it"""
reg = self.request_register()
self._var_reg_dict[var.id] = reg
# Named variables can be initialized externally
if var.name is not None:
reg.valid = False
reg.value = 0
def release_variable(self, var):
self.release_register(self.get_var_register(var))
def get_var_register(self, var) -> _Register:
"""Returns _Register of QiVariable var"""
reg = self._var_reg_dict.get(var.id)
if reg is None:
raise RuntimeError(
f"Variable not defined for Sequencer, var.id:{var.id}, {self._var_reg_dict}"
)
return reg
def get_var_value(self, var) -> Union[int, float, None]:
return self.get_var_register(var).get_value()
def request_register(self) -> _Register:
"""Returns register from stack, raises exception, if no registers are on stack anymore"""
try:
return self._register_stack.pop()
except IndexError as e:
print(
"Not enough registers available, sequencer "
+ str(self)
+ " error: "
+ str(e)
)
raise
def get_cycles_from_length(self, length) -> Union[_Register, int]:
"""If length is QiVariable, return _Register, else return numbers of cycles ceiled"""
if isinstance(length, _QiVariableBase):
return self.get_var_register(length)
elif isinstance(length, int):
length = float(length)
return util.conv_time_to_cycles(length, "ceil")
def release_register(self, reg: _Register):
"""Returns register to stack; Raises exception when register is already in stack, or addressing is faulty.
Releasing register 0 does nothing"""
if reg in self._register_stack:
raise IndexError("Release Register: Already released register")
if (reg.adr > Sequencer.AVAILABLE_REGISTERS) or (reg.adr < 0):
raise IndexError("Release Register: Address out of Range")
if reg == self.reg0:
return
reg.valid = True # if register was invalidated and is released again, return it to initial valid state
self._register_stack.append(reg)
def add_instruction_to_list(
self,
instruction: SequencerInstruction,
length_in_cycles: int = 1,
length_valid=True,
):
"""Adds instruction to list. If pulses are still running, adds choke instruction before adding the current command to the list"""
if self._trigger_mods.is_pulse_active:
self.trigger_choke_pulse()
if length_in_cycles == 0:
length_in_cycles = 1 # length is always at least 1 per instruction
self.instruction_list.append(instruction)
self._prog_cycles.add(
length_in_cycles, length_valid
) # Will be deprecated when external sync is possible.
def get_prog_size(self) -> int:
return len(self.instruction_list)
def add_mov_command(self, dst_reg: _Register, src_reg: _Register):
"""Copies value of src_reg to dst_reg."""
self.add_calculation(src_reg, QiOp.PLUS, 0, dst_reg)
def get_upper_immediate_value(self, value: SequencerInstruction.imm_type):
"""If bit 11 of lower value is 1, ADDI command sign extends the value. To account for that, sign extend lower 12 bits
and subtract from upper 20 bits."""
sign_extended_lower = (
value | 0xFFFFF000 if value & 0x00000800 != 0 else value & 0x00000FFF
)
return (value - sign_extended_lower) & 0xFFFFF000
def immediate_to_register(
self, val: SequencerInstruction.imm_type, dst_reg: Optional[_Register] = None
) -> _Register:
"""Loads immediate to dst_reg.
If dst_reg is not defined a new register is used to save val to.
If value == 0 and no register is specified, reg0 is returned, which always contains 0.
dst_reg.value is updated to reflect changes."""
if val == 0 and dst_reg is None:
return self.reg0
elif dst_reg is None:
dst_reg = self.request_register()
if isinstance(val, float):
raise NotImplementedError("float not implemented yet")
if SequencerInstruction.is_value_in_lower_immediate(val):
self.add_instruction_to_list(
| # Copyright © 2017-2023 Quantum Interface (quantuminterface@ipe.kit.edu)
# Richard Gebauer, IPE, Karlsruhe Institute of Technology
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
The lower level logic of the code generation.
This module tracks the sequencer state at the current point (e.g. register values, variable to register mapping, etc.),
provides helper functions to generate code for expressions and more.
"""
class _Register:
"""Class of Sequencer representing registers.
Keeps track of values in register. Values are used for program length. Program length is invalidated by use of If/Else.
TODO load commands invalidate value"""
def __init__(self, address) -> None:
self.adr = address
self.value = None
self.valid = True
def addition(self, val1, val2):
self.value = val1 + val2
def subtraction(self, val1, val2):
self.value = val1 - val2
def multiplication(self, val1, val2):
self.value = val1 * val2
def and_values(self, val1, val2):
self.value = val1 & val2
def or_values(self, val1, val2):
self.value = val1 | val2
def xor_values(self, val1, val2):
self.value = val1 ^ val2
def lshift(self, val1, val2):
self.value = val1 << val2
def rshift(self, val1, val2):
self.value = val1 >> val2
def inversion(self, val1, val2):
self.value = ~val1
# Dictionary used to receive function from input QiOp
eval_operation = {
QiOp.PLUS: addition,
QiOp.MINUS: subtraction,
QiOp.MULT: multiplication,
QiOp.AND: and_values,
QiOp.OR: or_values,
QiOp.XOR: xor_values,
QiOp.LSH: lshift,
QiOp.RSH: rshift,
QiOp.NOT: inversion,
}
def get_value(self):
if self.valid:
return self.value
return None
def update_register_value(self, val1, op, val2):
"""Register Values are updated to allow implicit synchronisations through wait when variable Wait/Pulse is used.
When a calculation is done using a invalid variable value, the ensuing value is also invalidated.
"""
if self.adr == 0:
self.value = 0 # reg0 always contains 0
return
if isinstance(val1, _Register):
if val1.value is None:
raise RuntimeError(
f"Variable at Register {val1.adr} has not been properly initialised"
)
if not val1.valid:
self.valid = False
val1 = val1.value
if isinstance(val2, _Register):
if val2.value is None:
raise RuntimeError(
f"Variable at Register {val2.adr} has not been properly initialised"
)
if not val2.valid:
self.valid = False
val2 = val2.value
self.eval_operation[op](self, val1, val2)
class ForRangeEntry:
def __init__(self, reg_addr, start_val, end_val, step_val) -> None:
self.reg_addr = reg_addr
self.start = start_val
self.end = end_val
self.step = step_val
self.end_addr = 0
self.iterations = 0
self.aggregate_iterations = 0
self.contained_entries: List[ForRangeEntry] = []
def _calc_aggregate(self):
"""Calculates the number of loops contained inside, considering nested entries, for later use at progress bar."""
self.iterations = _get_for_range_iterations(self.start, self.end, self.step)
if len(self.contained_entries) == 0 or self.iterations is None:
if self.iterations is None:
self.aggregate_iterations = 0
warnings.warn(
"A loop with variable start/end could not be counted towards total loop count. Progress bar might be inaccurate."
)
else:
self.aggregate_iterations = self.iterations
else:
nested = 0
for entry in self.contained_entries:
if entry.aggregate_iterations == 0:
warnings.warn(
"A loop with variable start/end could not be counted towards total loop count. Progress bar might be inaccurate."
)
continue
nested += entry.aggregate_iterations
self.aggregate_iterations = self.iterations * (nested if nested != 0 else 1)
def get_iteration(self, value: int) -> int:
"""Returns the current iteration depending on the parameter value"""
if isinstance(self.start, _QiVariableBase):
return 0
_step = self.step if isinstance(self.step, int) else self.step.value
iterations = 0
for _ in range(self.start, value, _step):
iterations += 1
return iterations
@staticmethod
def get_total_loops(entry_list):
if len(entry_list) == 0:
return 1
iterations = 0
for entry in entry_list:
iterations += entry.aggregate_iterations
return iterations if iterations > 0 else 1
@staticmethod
def calculate_current_loop(entry_list, register_list, prog_counter):
loop = 0
for entry in entry_list:
if entry.end_addr < prog_counter:
loop += entry.aggregate_iterations
else:
iteration = entry.get_iteration(register_list[entry.reg_addr])
if len(entry.contained_entries) == 0:
loop += iteration
else:
loop += iteration * ForRangeEntry.get_total_loops(
entry.contained_entries
) + ForRangeEntry.calculate_current_loop(
entry.contained_entries, register_list, prog_counter
)
return loop
return loop
class Sequencer:
AVAILABLE_REGISTERS = 31
MULTIPLICATION_LENGTH = 6
JUMP_EXECUTION_CYCLES = 2
LOAD_STORE_LENGTH = 8
# Additional delay to prevent ignored trigger for consecutive readouts
RECORDING_MODULE_DELAY_CYCLES = 1
CHOKE_PULSE_INDEX = 14
def __init__(self, cell_index=None):
self.alu = _ALU(self)
self.reset()
self.cell_index = cell_index
def reset(self):
self._register_stack: List[_Register] = []
self.instruction_list: List[SequencerInstruction] = []
self._prog_cycles = _ProgramCycles()
self._var_reg_dict: Dict[Any, _Register] = {}
self._trigger_mods = _TriggerModules()
self._for_range_list = []
self._for_range_stack: List[ForRangeEntry] = []
# register 0 always contains 0, so is not in stack
self.reg0 = _Register(0)
for x in range(Sequencer.AVAILABLE_REGISTERS, 0, -1):
self._register_stack.append(_Register(x))
def print_assembler(self):
pc = 0
for instruction in self.instruction_list:
print(str(pc) + "# ", end="")
print(instruction)
pc += 1
@property
def prog_cycles(self):
"""Program length is used for implicit synchs with Wait-Commands. If a program contains variable If/Else or loads to wait registers
prog_length can not be determined. Invalid prog_cycles are some value less than 0.
"""
if self._prog_cycles.valid:
return self._prog_cycles.cycles
return _ProgramCycles.INVALID
@prog_cycles.setter
def prog_cycles(self, x):
"""Set externally when ForRange is used."""
self._prog_cycles.cycles = x
@property
def recording_delay(self):
return util.conv_cycles_to_time(self.RECORDING_MODULE_DELAY_CYCLES)
@property
def readout_active(self):
return self._trigger_mods.is_readout_active
@property
def manipulation_active(self):
return self._trigger_mods.is_manipulation_active
def add_variable(self, var):
"""Adds variable to sequencer, reserving a register for it"""
reg = self.request_register()
self._var_reg_dict[var.id] = reg
# Named variables can be initialized externally
if var.name is not None:
reg.valid = False
reg.value = 0
def release_variable(self, var):
self.release_register(self.get_var_register(var))
def get_var_register(self, var) -> _Register:
"""Returns _Register of QiVariable var"""
reg = self._var_reg_dict.get(var.id)
if reg is None:
raise RuntimeError(
f"Variable not defined for Sequencer, var.id:{var.id}, {self._var_reg_dict}"
)
return reg
def get_var_value(self, var) -> Union[int, float, None]:
return self.get_var_register(var).get_value()
def request_register(self) -> _Register:
"""Returns register from stack, raises exception, if no registers are on stack anymore"""
try:
return self._register_stack.pop()
except IndexError as e:
print(
"Not enough registers available, sequencer "
+ str(self)
+ " error: "
+ str(e)
)
raise
def get_cycles_from_length(self, length) -> Union[_Register, int]:
"""If length is QiVariable, return _Register, else return numbers of cycles ceiled"""
if isinstance(length, _QiVariableBase):
return self.get_var_register(length)
elif isinstance(length, int):
length = float(length)
return util.conv_time_to_cycles(length, "ceil")
def release_register(self, reg: _Register):
"""Returns register to stack; Raises exception when register is already in stack, or addressing is faulty.
Releasing register 0 does nothing"""
if reg in self._register_stack:
raise IndexError("Release Register: Already released register")
if (reg.adr > Sequencer.AVAILABLE_REGISTERS) or (reg.adr < 0):
raise IndexError("Release Register: Address out of Range")
if reg == self.reg0:
return
reg.valid = True # if register was invalidated and is released again, return it to initial valid state
self._register_stack.append(reg)
def add_instruction_to_list(
self,
instruction: SequencerInstruction,
length_in_cycles: int = 1,
length_valid=True,
):
"""Adds instruction to list. If pulses are still running, adds choke instruction before adding the current command to the list"""
if self._trigger_mods.is_pulse_active:
self.trigger_choke_pulse()
if length_in_cycles == 0:
length_in_cycles = 1 # length is always at least 1 per instruction
self.instruction_list.append(instruction)
self._prog_cycles.add(
length_in_cycles, length_valid
) # Will be deprecated when external sync is possible.
def get_prog_size(self) -> int:
return len(self.instruction_list)
def add_mov_command(self, dst_reg: _Register, src_reg: _Register):
"""Copies value of src_reg to dst_reg."""
self.add_calculation(src_reg, QiOp.PLUS, 0, dst_reg)
def get_upper_immediate_value(self, value: SequencerInstruction.imm_type):
"""If bit 11 of lower value is 1, ADDI command sign extends the value. To account for that, sign extend lower 12 bits
and subtract from upper 20 bits."""
sign_extended_lower = (
value | 0xFFFFF000 if value & 0x00000800 != 0 else value & 0x00000FFF
)
return (value - sign_extended_lower) & 0xFFFFF000
def immediate_to_register(
self, val: SequencerInstruction.imm_type, dst_reg: Optional[_Register] = None
) -> _Register:
"""Loads immediate to dst_reg.
If dst_reg is not defined a new register is used to save val to.
If value == 0 and no register is specified, reg0 is returned, which always contains 0.
dst_reg.value is updated to reflect changes."""
if val == 0 and dst_reg is None:
return self.reg0
elif dst_reg is None:
dst_reg = self.request_register()
if isinstance(val, float):
raise NotImplementedError("float not implemented yet")
if SequencerInstruction.is_value_in_lower_immediate(val):
self.add_instruction_to_list( | SeqRegImmediateInst(QiOp.PLUS, dst_reg.adr, 0, val) | 13 | 2023-11-10 10:26:10+00:00 | 12k |
jpcadena/fastapi-boilerplate | app/api/api_v1/router/user.py | [
{
"identifier": "get_redis_dep",
"path": "app/api/deps.py",
"snippet": "async def get_redis_dep(\n redis_dependency: Annotated[RedisDependency, Depends()]\n) -> AsyncGenerator[Redis, None]: # type: ignore\n \"\"\"\n Lazy generation of Redis dependency\n :param redis_dependency: The dependen... | import logging
from typing import Annotated, Any, Optional
from uuid import uuid4
from fastapi import (
APIRouter,
BackgroundTasks,
Body,
Depends,
HTTPException,
Response,
status,
)
from fastapi.params import Path, Query
from pydantic import UUID4, NonNegativeInt, PositiveInt
from redis.asyncio import Redis
from sqlalchemy.exc import SQLAlchemyError
from app.api.deps import get_redis_dep
from app.api.oauth2_validation import get_current_user
from app.config.config import (
get_auth_settings,
get_init_settings,
get_settings,
init_setting,
)
from app.config.db.auth_settings import AuthSettings
from app.config.init_settings import InitSettings
from app.config.settings import Settings
from app.exceptions.exceptions import NotFoundException, ServiceException
from app.schemas.external.user import (
UserCreate,
UserCreateResponse,
UserResponse,
UsersResponse,
UserUpdate,
UserUpdateResponse,
)
from app.schemas.infrastructure.user import UserAuth
from app.services.infrastructure.cached_user import CachedUserService
from app.services.infrastructure.user import UserService, get_user_service
from app.tasks.email_tasks.email_tasks import (
send_new_account_email,
send_welcome_email,
) | 10,364 | """
User API Router
This module provides CRUD (Create, Retrieve, Update, Delete) operations
for users.
"""
logger: logging.Logger = logging.getLogger(__name__)
router: APIRouter = APIRouter(prefix="/user", tags=["user"])
@router.get("", response_model=UsersResponse)
async def get_users(
current_user: Annotated[UserAuth, Depends(get_current_user)],
| """
User API Router
This module provides CRUD (Create, Retrieve, Update, Delete) operations
for users.
"""
logger: logging.Logger = logging.getLogger(__name__)
router: APIRouter = APIRouter(prefix="/user", tags=["user"])
@router.get("", response_model=UsersResponse)
async def get_users(
current_user: Annotated[UserAuth, Depends(get_current_user)], | user_service: Annotated[UserService, Depends(get_user_service)], | 16 | 2023-11-17 00:32:32+00:00 | 12k |
dataaug/open-interpreter-free | interpreter/core/core.py | [
{
"identifier": "cli",
"path": "interpreter/cli/cli.py",
"snippet": "def cli(interpreter):\n parser = argparse.ArgumentParser(description=\"Open Interpreter\")\n\n # Add arguments\n for arg in arguments:\n if arg[\"type\"] == bool:\n parser.add_argument(\n f'-{a... | import json
import os
from datetime import datetime
from ..cli.cli import cli
from ..llm.setup_llm import setup_llm
from ..terminal_interface.terminal_interface import terminal_interface
from ..terminal_interface.validate_llm_settings import validate_llm_settings
from ..utils.check_for_update import check_for_update
from ..utils.display_markdown_message import display_markdown_message
from ..utils.get_config import get_config, user_config_path
from ..utils.local_storage_path import get_storage_path
from .generate_system_message import generate_system_message
from .respond import respond | 7,228 | """
This file defines the Interpreter class.
It's the main file. `import interpreter` will import an instance of this class.
"""
class Interpreter:
def cli(self):
cli(self)
def __init__(self):
# State
self.messages = []
self._code_interpreters = {}
self.config_file = user_config_path
# Settings
self.local = False
self.auto_run = False
self.debug_mode = False
self.max_output = 2000
self.safe_mode = "off"
self.disable_procedures = False
# Conversation history
self.conversation_history = True
self.conversation_filename = None
self.conversation_history_path = get_storage_path("conversations")
# LLM settings
self.model = ""
self.temperature = None
self.system_message = ""
self.context_window = None
self.max_tokens = None
self.api_base = None
self.api_key = None
self.max_budget = None
self._llm = None
self.function_calling_llm = None
self.vision = False # LLM supports vision
# Load config defaults
self.extend_config(self.config_file)
# Check for update
try:
if not self.local:
# This should actually be pushed into the utility
if check_for_update():
display_markdown_message(
"> **A new version of Open Interpreter is available.**\n>Please run: `pip install --upgrade open-interpreter`\n\n---"
)
except:
# Doesn't matter
pass
def extend_config(self, config_path):
if self.debug_mode:
print(f"Extending configuration from `{config_path}`")
config = get_config(config_path)
self.__dict__.update(config)
def chat(self, message=None, display=True, stream=False):
if stream:
return self._streaming_chat(message=message, display=display)
# If stream=False, *pull* from the stream.
for _ in self._streaming_chat(message=message, display=display):
pass
return self.messages
def _streaming_chat(self, message=None, display=True):
# If we have a display,
# we can validate our LLM settings w/ the user first
gpt4free = True
if display and not gpt4free:
validate_llm_settings(self)
# Setup the LLM
if not self._llm:
| """
This file defines the Interpreter class.
It's the main file. `import interpreter` will import an instance of this class.
"""
class Interpreter:
def cli(self):
cli(self)
def __init__(self):
# State
self.messages = []
self._code_interpreters = {}
self.config_file = user_config_path
# Settings
self.local = False
self.auto_run = False
self.debug_mode = False
self.max_output = 2000
self.safe_mode = "off"
self.disable_procedures = False
# Conversation history
self.conversation_history = True
self.conversation_filename = None
self.conversation_history_path = get_storage_path("conversations")
# LLM settings
self.model = ""
self.temperature = None
self.system_message = ""
self.context_window = None
self.max_tokens = None
self.api_base = None
self.api_key = None
self.max_budget = None
self._llm = None
self.function_calling_llm = None
self.vision = False # LLM supports vision
# Load config defaults
self.extend_config(self.config_file)
# Check for update
try:
if not self.local:
# This should actually be pushed into the utility
if check_for_update():
display_markdown_message(
"> **A new version of Open Interpreter is available.**\n>Please run: `pip install --upgrade open-interpreter`\n\n---"
)
except:
# Doesn't matter
pass
def extend_config(self, config_path):
if self.debug_mode:
print(f"Extending configuration from `{config_path}`")
config = get_config(config_path)
self.__dict__.update(config)
def chat(self, message=None, display=True, stream=False):
if stream:
return self._streaming_chat(message=message, display=display)
# If stream=False, *pull* from the stream.
for _ in self._streaming_chat(message=message, display=display):
pass
return self.messages
def _streaming_chat(self, message=None, display=True):
# If we have a display,
# we can validate our LLM settings w/ the user first
gpt4free = True
if display and not gpt4free:
validate_llm_settings(self)
# Setup the LLM
if not self._llm: | self._llm = setup_llm(self) | 1 | 2023-11-16 03:10:42+00:00 | 12k |
3dp-accelerometer/octoprint-accelerometer | octoprint_accelerometer/plugin.py | [
{
"identifier": "DataPostProcessRunner",
"path": "octoprint_accelerometer/data_post_process.py",
"snippet": "class DataPostProcessRunner:\n \"\"\"\n Runner for traversing stream files and post-processing (FFT) if necessary.\n \"\"\"\n def __init__(self,\n logger: Logger,\n ... | import os
import flask
import octoprint.plugin
from typing import Any, Dict, List, Literal, Optional, Tuple
from octoprint.server.util.tornado import LargeResponseHandler, path_validation_factory
from octoprint.util import is_hidden_path
from py3dpaxxel.cli.args import convert_axis_from_str
from py3dpaxxel.controller.api import Py3dpAxxel
from py3dpaxxel.sampling_tasks.series_argument_generator import RunArgsGenerator
from py3dpaxxel.storage.file_filter import FileSelector, File
from py3dpaxxel.storage.filename import timestamp_from_args
from py3dpaxxel.storage.filename_meta import FilenameMetaStream, FilenameMetaFft
from octoprint_accelerometer.data_post_process import DataPostProcessRunner
from octoprint_accelerometer.event_types import DataProcessingEventType, RecordingEventType
from octoprint_accelerometer.record_step_series import RecordStepSeriesRunner
from octoprint_accelerometer.transfer_types import RunMeta, SequenceMeta, StreamMeta, DataSets, FftMeta, Timestamp | 7,431 | self.auto_home: bool = False
self.start_frequency_hz: int = 0
self.stop_frequency_hz: int = 0
self.step_frequency_hz: int = 0
self.start_zeta_em2: int = 0
self.stop_zeta_em2: int = 0
self.step_zeta_em2: int = 0
self.sensor_output_data_rate_hz: int = 0
self.data_remove_before_run: bool = False
self.do_sample_x: bool = False
self.do_sample_y: bool = False
self.do_sample_z: bool = False
self.recording_timespan_s: float = 0
self.sequence_separation_s: float = 0
self.step_separation_s: float = 0
self.do_dry_run: bool = False
# other parameters shared with UI
self.devices_seen: List[str] = []
self.device: str = ""
self.controller_fifo_overrun_error: bool = False
self.controller_response_error: bool = False
# following parameters are computed from above parameters
self.axis_x_sampling_start: Point3D = Point3D(0, 0, 0)
self.axis_y_sampling_start: Point3D = Point3D(0, 0, 0)
self.axis_z_sampling_start: Point3D = Point3D(0, 0, 0)
# recording runner: once constructed before invocation all properties shall be updated
self.data_recording_runner: Optional[RecordStepSeriesRunner] = None
self.data_processing_runner: Optional[DataPostProcessRunner] = None
@staticmethod
def _get_devices() -> Tuple[str, List[str]]:
"""
:return: tuple of primary device (if any) and list of all devices
"""
seen_devices: List[str] = [k for k in Py3dpAxxel.get_devices_dict().keys()]
primary: str = seen_devices[0] if len(seen_devices) > 0 else None
return primary, seen_devices
def _update_seen_devices(self):
primary, seen_devices = self._get_devices()
self._logger.debug(f"seen devices: primary={primary}, seen={seen_devices}")
self.devices_seen = seen_devices
self.device = primary if primary is not None else ""
@octoprint.plugin.BlueprintPlugin.route("/set_values", methods=["POST"])
def on_api_set_values(self):
data = flask.request.json
self._update_members_from_api(data)
response = flask.jsonify(message="OK")
response.status_code = 202
return response
@octoprint.plugin.BlueprintPlugin.route("/start_recording", methods=["POST"])
def on_api_start_recording(self):
self._start_recording()
response = flask.jsonify(message="OK")
response.status_code = 202
return response
@octoprint.plugin.BlueprintPlugin.route("/abort_recording", methods=["POST"])
def on_api_abort_recording(self):
self._abort_recording()
response = flask.jsonify(message="OK")
response.status_code = 202
return response
@octoprint.plugin.BlueprintPlugin.route("/start_data_processing", methods=["POST"])
def on_api_start_data_processing(self):
self._start_data_processing()
response = flask.jsonify(message="OK")
response.status_code = 202
return response
@octoprint.plugin.BlueprintPlugin.route("/get_estimate", methods=["GET"])
def on_api_get_estimate(self):
return flask.jsonify({f"estimate": self._estimate_duration()})
@octoprint.plugin.BlueprintPlugin.route("/get_parameters", methods=["GET"])
def on_api_get_parameters(self):
return flask.jsonify({f"parameters": self._get_parameter_dict(flask.request.args)})
@octoprint.plugin.BlueprintPlugin.route("/get_files_listing", methods=["GET"])
def on_api_get_files_listing(self):
fs = FileSelector(os.path.join(self.get_plugin_data_folder(), ".*"))
files_details = fs.filter()
return flask.jsonify({f"files": files_details})
@octoprint.plugin.BlueprintPlugin.route("/get_stream_files_listing", methods=["GET"])
def on_api_get_stream_files_listing(self):
fs = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_STREAM_FILE_NAME_PREFIX}-.*\\.tsv$"))
files = fs.filter()
files_details = [StreamMeta(f, FilenameMetaStream().from_filename(f.filename_ext)) for f in files]
return flask.jsonify({f"stream_files": files_details})
@octoprint.plugin.BlueprintPlugin.route("/get_fft_files_listing", methods=["GET"])
def on_api_get_fft_files_listing(self):
fs = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_FFT_FILE_NAME_PREFIX}-.*\\.tsv$"))
files = fs.filter()
files_details = [FftMeta(f, FilenameMetaFft().from_filename(f.filename_ext)) for f in files]
return flask.jsonify({f"fft_files": files_details})
@octoprint.plugin.BlueprintPlugin.route("/get_data_listing", methods=["GET"])
def on_api_get_data_listing(self):
fs_stream = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_STREAM_FILE_NAME_PREFIX}-.*\\.tsv$"))
fs_fft = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_FFT_FILE_NAME_PREFIX}-.*\\.tsv$"))
files_meta_data_stream: List[Tuple[File, FilenameMetaStream]] = [(f, FilenameMetaStream().from_filename(f.filename_ext)) for f in fs_stream.filter()]
files_meta_data_fft: List[Tuple[File, FilenameMetaFft]] = [(f, FilenameMetaFft().from_filename(f.filename_ext)) for f in fs_fft.filter()]
data_sets: DataSets = DataSets()
# append all streams
for file_meta, filename_meta in files_meta_data_stream:
run_hash, sequence_nr, stream_hash = filename_meta.run_hash, filename_meta.sequence_nr, filename_meta.stream_hash
if run_hash not in data_sets.runs.keys():
data_sets.runs[run_hash] = RunMeta()
if sequence_nr not in data_sets.runs[run_hash].sequences.keys():
|
class Point3D:
def __init__(self, x: int, y: int, z: int):
self.x: int = x
self.y: int = y
self.z: int = z
def __str__(self):
return f"x={self.x} y={self.y} z={self.z}"
class OctoprintAccelerometerPlugin(octoprint.plugin.StartupPlugin,
octoprint.plugin.SettingsPlugin,
octoprint.plugin.AssetPlugin,
octoprint.plugin.TemplatePlugin,
octoprint.plugin.BlueprintPlugin):
OUTPUT_STREAM_FILE_NAME_PREFIX: str = "axxel"
OUTPUT_FFT_FILE_NAME_PREFIX: str = "fft"
# noinspection PyMissingConstructor
def __init__(self):
# following parameters are shared among settings and UI
self.distance_x_mm: int = 0
self.distance_y_mm: int = 0
self.distance_z_mm: int = 0
self.step_count: int = 0
self.speed_x_mm_s: int = 0
self.speed_y_mm_s: int = 0
self.speed_z_mm_s: int = 0
self.acceleration_x_mm_ss: int = 0
self.acceleration_y_mm_ss: int = 0
self.acceleration_z_mm_ss: int = 0
self.anchor_point_coord_x_mm: int = 0
self.anchor_point_coord_y_mm: int = 0
self.anchor_point_coord_z_mm: int = 0
self.sequence_count: int = 0
self.go_start: bool = False
self.return_start: bool = False
self.auto_home: bool = False
self.start_frequency_hz: int = 0
self.stop_frequency_hz: int = 0
self.step_frequency_hz: int = 0
self.start_zeta_em2: int = 0
self.stop_zeta_em2: int = 0
self.step_zeta_em2: int = 0
self.sensor_output_data_rate_hz: int = 0
self.data_remove_before_run: bool = False
self.do_sample_x: bool = False
self.do_sample_y: bool = False
self.do_sample_z: bool = False
self.recording_timespan_s: float = 0
self.sequence_separation_s: float = 0
self.step_separation_s: float = 0
self.do_dry_run: bool = False
# other parameters shared with UI
self.devices_seen: List[str] = []
self.device: str = ""
self.controller_fifo_overrun_error: bool = False
self.controller_response_error: bool = False
# following parameters are computed from above parameters
self.axis_x_sampling_start: Point3D = Point3D(0, 0, 0)
self.axis_y_sampling_start: Point3D = Point3D(0, 0, 0)
self.axis_z_sampling_start: Point3D = Point3D(0, 0, 0)
# recording runner: once constructed before invocation all properties shall be updated
self.data_recording_runner: Optional[RecordStepSeriesRunner] = None
self.data_processing_runner: Optional[DataPostProcessRunner] = None
@staticmethod
def _get_devices() -> Tuple[str, List[str]]:
"""
:return: tuple of primary device (if any) and list of all devices
"""
seen_devices: List[str] = [k for k in Py3dpAxxel.get_devices_dict().keys()]
primary: str = seen_devices[0] if len(seen_devices) > 0 else None
return primary, seen_devices
def _update_seen_devices(self):
primary, seen_devices = self._get_devices()
self._logger.debug(f"seen devices: primary={primary}, seen={seen_devices}")
self.devices_seen = seen_devices
self.device = primary if primary is not None else ""
@octoprint.plugin.BlueprintPlugin.route("/set_values", methods=["POST"])
def on_api_set_values(self):
data = flask.request.json
self._update_members_from_api(data)
response = flask.jsonify(message="OK")
response.status_code = 202
return response
@octoprint.plugin.BlueprintPlugin.route("/start_recording", methods=["POST"])
def on_api_start_recording(self):
self._start_recording()
response = flask.jsonify(message="OK")
response.status_code = 202
return response
@octoprint.plugin.BlueprintPlugin.route("/abort_recording", methods=["POST"])
def on_api_abort_recording(self):
self._abort_recording()
response = flask.jsonify(message="OK")
response.status_code = 202
return response
@octoprint.plugin.BlueprintPlugin.route("/start_data_processing", methods=["POST"])
def on_api_start_data_processing(self):
self._start_data_processing()
response = flask.jsonify(message="OK")
response.status_code = 202
return response
@octoprint.plugin.BlueprintPlugin.route("/get_estimate", methods=["GET"])
def on_api_get_estimate(self):
return flask.jsonify({f"estimate": self._estimate_duration()})
@octoprint.plugin.BlueprintPlugin.route("/get_parameters", methods=["GET"])
def on_api_get_parameters(self):
return flask.jsonify({f"parameters": self._get_parameter_dict(flask.request.args)})
@octoprint.plugin.BlueprintPlugin.route("/get_files_listing", methods=["GET"])
def on_api_get_files_listing(self):
fs = FileSelector(os.path.join(self.get_plugin_data_folder(), ".*"))
files_details = fs.filter()
return flask.jsonify({f"files": files_details})
@octoprint.plugin.BlueprintPlugin.route("/get_stream_files_listing", methods=["GET"])
def on_api_get_stream_files_listing(self):
fs = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_STREAM_FILE_NAME_PREFIX}-.*\\.tsv$"))
files = fs.filter()
files_details = [StreamMeta(f, FilenameMetaStream().from_filename(f.filename_ext)) for f in files]
return flask.jsonify({f"stream_files": files_details})
@octoprint.plugin.BlueprintPlugin.route("/get_fft_files_listing", methods=["GET"])
def on_api_get_fft_files_listing(self):
fs = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_FFT_FILE_NAME_PREFIX}-.*\\.tsv$"))
files = fs.filter()
files_details = [FftMeta(f, FilenameMetaFft().from_filename(f.filename_ext)) for f in files]
return flask.jsonify({f"fft_files": files_details})
@octoprint.plugin.BlueprintPlugin.route("/get_data_listing", methods=["GET"])
def on_api_get_data_listing(self):
fs_stream = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_STREAM_FILE_NAME_PREFIX}-.*\\.tsv$"))
fs_fft = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_FFT_FILE_NAME_PREFIX}-.*\\.tsv$"))
files_meta_data_stream: List[Tuple[File, FilenameMetaStream]] = [(f, FilenameMetaStream().from_filename(f.filename_ext)) for f in fs_stream.filter()]
files_meta_data_fft: List[Tuple[File, FilenameMetaFft]] = [(f, FilenameMetaFft().from_filename(f.filename_ext)) for f in fs_fft.filter()]
data_sets: DataSets = DataSets()
# append all streams
for file_meta, filename_meta in files_meta_data_stream:
run_hash, sequence_nr, stream_hash = filename_meta.run_hash, filename_meta.sequence_nr, filename_meta.stream_hash
if run_hash not in data_sets.runs.keys():
data_sets.runs[run_hash] = RunMeta()
if sequence_nr not in data_sets.runs[run_hash].sequences.keys(): | data_sets.runs[run_hash].sequences[sequence_nr] = SequenceMeta() | 5 | 2023-11-14 17:15:15+00:00 | 12k |
hmmbug/pythaidate | tests/test_pakdate.py | [
{
"identifier": "julianday",
"path": "pythaidate/julianday.py",
"snippet": "def to_julianday(year, month, day):\ndef from_julianday(jd):\ndef today(): # pragma: no cover\ndef date_to_julianday(d):\ndef julianday_to_date(obj):\n B = 0\n A = math.trunc(yearp / 100.)\n B = 2 - A + mat... | from datetime import date, timedelta
from pythaidate import PakDate, CsDate, julianday
from pythaidate.constants import PAK_JULIAN_DAY_OFFSET
import json
import unittest
import os
import pathlib
import random
import logging | 9,969 |
RUN_PERCENT = 10
if os.environ.get("RUN_PERCENT"):
RUN_PERCENT = int(os.environ.get("RUN_PERCENT"))
if RUN_PERCENT > 100:
RUN_PERCENT = 100
RUN_PERCENT /= 100
for datafile in ("pak.data", "pak.min.data"):
datafile = os.path.join(pathlib.Path(__file__).parent.resolve(), "data", datafile)
if os.path.exists(datafile):
break
else:
raise FileNotFoundError("Pak data file not found.")
random.seed()
def read_test_date(sample=1, minjd=None):
with open(datafile) as fh:
for ln in fh:
if random.random() > sample:
continue
i = ln.rstrip().split(" ")
y, m, d = i[4].split("-")
e = {
"pakcode": i[0],
"jd": int(i[1][3:]),
"hk": int(i[2][3:]),
"masak": int(i[3][6:]),
"year": int(y),
"month": int(m),
"day": int(d),
"iswanphra": i[5] == 't',
}
if minjd and e["jd"] < minjd:
continue
yield e
class Test_PakDate(unittest.TestCase):
def test_jd_pre_epoch(self):
with self.assertRaises(ValueError):
# pre-epoch jd
|
RUN_PERCENT = 10
if os.environ.get("RUN_PERCENT"):
RUN_PERCENT = int(os.environ.get("RUN_PERCENT"))
if RUN_PERCENT > 100:
RUN_PERCENT = 100
RUN_PERCENT /= 100
for datafile in ("pak.data", "pak.min.data"):
datafile = os.path.join(pathlib.Path(__file__).parent.resolve(), "data", datafile)
if os.path.exists(datafile):
break
else:
raise FileNotFoundError("Pak data file not found.")
random.seed()
def read_test_date(sample=1, minjd=None):
with open(datafile) as fh:
for ln in fh:
if random.random() > sample:
continue
i = ln.rstrip().split(" ")
y, m, d = i[4].split("-")
e = {
"pakcode": i[0],
"jd": int(i[1][3:]),
"hk": int(i[2][3:]),
"masak": int(i[3][6:]),
"year": int(y),
"month": int(m),
"day": int(d),
"iswanphra": i[5] == 't',
}
if minjd and e["jd"] < minjd:
continue
yield e
class Test_PakDate(unittest.TestCase):
def test_jd_pre_epoch(self):
with self.assertRaises(ValueError):
# pre-epoch jd | p = PakDate(jd=PAK_JULIAN_DAY_OFFSET - 5) | 3 | 2023-11-18 21:14:01+00:00 | 12k |
CmosWolf1/Code_implementation_for_paper_SKZC | diffusiondet/detector.py | [
{
"identifier": "SetCriterionDynamicK",
"path": "diffusiondet/loss.py",
"snippet": "class SetCriterionDynamicK(nn.Module):\n \"\"\" This class computes the loss for DiffusionDet.\n The process happens in two steps:\n 1) we compute hungarian assignment between ground truth boxes and the outp... | import math
import random
import torch
import torch.nn.functional as F
from typing import List
from collections import namedtuple
from torch import nn
from detectron2.layers import batched_nms
from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, detector_postprocess
from detectron2.structures import Boxes, ImageList, Instances
from .loss import SetCriterionDynamicK, HungarianMatcherDynamicK
from .head import DynamicHead
from .util.box_ops import box_cxcywh_to_xyxy, box_xyxy_to_cxcywh
from .util.misc import nested_tensor_from_tensor_list | 9,358 | """
steps = timesteps + 1
x = torch.linspace(0, timesteps, steps, dtype=torch.float64)
alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * math.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.999)
@META_ARCH_REGISTRY.register()
class DiffusionDet(nn.Module):
"""
Implement DiffusionDet
"""
def __init__(self, cfg):
super().__init__()
self.device = torch.device(cfg.MODEL.DEVICE)
self.in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
self.num_classes = cfg.MODEL.DiffusionDet.NUM_CLASSES
self.num_proposals = cfg.MODEL.DiffusionDet.NUM_PROPOSALS
self.hidden_dim = cfg.MODEL.DiffusionDet.HIDDEN_DIM
self.num_heads = cfg.MODEL.DiffusionDet.NUM_HEADS
# Build Backbone.
self.backbone = build_backbone(cfg)
self.size_divisibility = self.backbone.size_divisibility
# build diffusion
timesteps = 1000
sampling_timesteps = cfg.MODEL.DiffusionDet.SAMPLE_STEP
self.objective = 'pred_x0'
betas = cosine_beta_schedule(timesteps)
alphas = 1. - betas
alphas_cumprod = torch.cumprod(alphas, dim=0)
alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value=1.)
timesteps, = betas.shape
self.num_timesteps = int(timesteps)
self.sampling_timesteps = default(sampling_timesteps, timesteps)
assert self.sampling_timesteps <= timesteps
self.is_ddim_sampling = self.sampling_timesteps < timesteps
self.ddim_sampling_eta = 1.
self.self_condition = False
self.scale = cfg.MODEL.DiffusionDet.SNR_SCALE
self.box_renewal = True
self.use_ensemble = True
self.register_buffer('betas', betas)
self.register_buffer('alphas_cumprod', alphas_cumprod)
self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
self.register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
self.register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))
self.register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))
self.register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
self.register_buffer('posterior_variance', posterior_variance)
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
self.register_buffer('posterior_log_variance_clipped', torch.log(posterior_variance.clamp(min=1e-20)))
self.register_buffer('posterior_mean_coef1', betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))
self.register_buffer('posterior_mean_coef2',
(1. - alphas_cumprod_prev) * torch.sqrt(alphas) / (1. - alphas_cumprod))
# Build Dynamic Head.
self.head = DynamicHead(cfg=cfg, roi_input_shape=self.backbone.output_shape())
# Loss parameters:
class_weight = cfg.MODEL.DiffusionDet.CLASS_WEIGHT
giou_weight = cfg.MODEL.DiffusionDet.GIOU_WEIGHT
l1_weight = cfg.MODEL.DiffusionDet.L1_WEIGHT
no_object_weight = cfg.MODEL.DiffusionDet.NO_OBJECT_WEIGHT
self.deep_supervision = cfg.MODEL.DiffusionDet.DEEP_SUPERVISION
self.use_focal = cfg.MODEL.DiffusionDet.USE_FOCAL
self.use_fed_loss = cfg.MODEL.DiffusionDet.USE_FED_LOSS
self.use_nms = cfg.MODEL.DiffusionDet.USE_NMS
# Build Criterion.
matcher = HungarianMatcherDynamicK(
cfg=cfg, cost_class=class_weight, cost_bbox=l1_weight, cost_giou=giou_weight, use_focal=self.use_focal
)
weight_dict = {"loss_ce": class_weight, "loss_bbox": l1_weight, "loss_giou": giou_weight}
if self.deep_supervision:
aux_weight_dict = {}
for i in range(self.num_heads - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ["labels", "boxes"]
self.criterion = SetCriterionDynamicK(
cfg=cfg, num_classes=self.num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=no_object_weight,
losses=losses, use_focal=self.use_focal,)
pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(3, 1, 1)
pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(3, 1, 1)
self.normalizer = lambda x: (x - pixel_mean) / pixel_std
self.to(self.device)
def predict_noise_from_start(self, x_t, t, x0):
return (
(extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - x0) /
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
)
def model_predictions(self, backbone_feats, images_whwh, x, t, x_self_cond=None, clip_x_start=False):
x_boxes = torch.clamp(x, min=-1 * self.scale, max=self.scale)
x_boxes = ((x_boxes / self.scale) + 1) / 2
| # ========================================
# Modified by Shoufa Chen
# ========================================
# Modified by Peize Sun, Rufeng Zhang
# Contact: {sunpeize, cxrfzhang}@foxmail.com
#
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
__all__ = ["DiffusionDet"]
ModelPrediction = namedtuple('ModelPrediction', ['pred_noise', 'pred_x_start'])
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def extract(a, t, x_shape):
"""extract the appropriate t index for a batch of indices"""
batch_size = t.shape[0]
out = a.gather(-1, t)
return out.reshape(batch_size, *((1,) * (len(x_shape) - 1)))
def cosine_beta_schedule(timesteps, s=0.008):
"""
cosine schedule
as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
"""
steps = timesteps + 1
x = torch.linspace(0, timesteps, steps, dtype=torch.float64)
alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * math.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return torch.clip(betas, 0, 0.999)
@META_ARCH_REGISTRY.register()
class DiffusionDet(nn.Module):
"""
Implement DiffusionDet
"""
def __init__(self, cfg):
super().__init__()
self.device = torch.device(cfg.MODEL.DEVICE)
self.in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
self.num_classes = cfg.MODEL.DiffusionDet.NUM_CLASSES
self.num_proposals = cfg.MODEL.DiffusionDet.NUM_PROPOSALS
self.hidden_dim = cfg.MODEL.DiffusionDet.HIDDEN_DIM
self.num_heads = cfg.MODEL.DiffusionDet.NUM_HEADS
# Build Backbone.
self.backbone = build_backbone(cfg)
self.size_divisibility = self.backbone.size_divisibility
# build diffusion
timesteps = 1000
sampling_timesteps = cfg.MODEL.DiffusionDet.SAMPLE_STEP
self.objective = 'pred_x0'
betas = cosine_beta_schedule(timesteps)
alphas = 1. - betas
alphas_cumprod = torch.cumprod(alphas, dim=0)
alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value=1.)
timesteps, = betas.shape
self.num_timesteps = int(timesteps)
self.sampling_timesteps = default(sampling_timesteps, timesteps)
assert self.sampling_timesteps <= timesteps
self.is_ddim_sampling = self.sampling_timesteps < timesteps
self.ddim_sampling_eta = 1.
self.self_condition = False
self.scale = cfg.MODEL.DiffusionDet.SNR_SCALE
self.box_renewal = True
self.use_ensemble = True
self.register_buffer('betas', betas)
self.register_buffer('alphas_cumprod', alphas_cumprod)
self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
self.register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
self.register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))
self.register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))
self.register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
self.register_buffer('posterior_variance', posterior_variance)
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
self.register_buffer('posterior_log_variance_clipped', torch.log(posterior_variance.clamp(min=1e-20)))
self.register_buffer('posterior_mean_coef1', betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))
self.register_buffer('posterior_mean_coef2',
(1. - alphas_cumprod_prev) * torch.sqrt(alphas) / (1. - alphas_cumprod))
# Build Dynamic Head.
self.head = DynamicHead(cfg=cfg, roi_input_shape=self.backbone.output_shape())
# Loss parameters:
class_weight = cfg.MODEL.DiffusionDet.CLASS_WEIGHT
giou_weight = cfg.MODEL.DiffusionDet.GIOU_WEIGHT
l1_weight = cfg.MODEL.DiffusionDet.L1_WEIGHT
no_object_weight = cfg.MODEL.DiffusionDet.NO_OBJECT_WEIGHT
self.deep_supervision = cfg.MODEL.DiffusionDet.DEEP_SUPERVISION
self.use_focal = cfg.MODEL.DiffusionDet.USE_FOCAL
self.use_fed_loss = cfg.MODEL.DiffusionDet.USE_FED_LOSS
self.use_nms = cfg.MODEL.DiffusionDet.USE_NMS
# Build Criterion.
matcher = HungarianMatcherDynamicK(
cfg=cfg, cost_class=class_weight, cost_bbox=l1_weight, cost_giou=giou_weight, use_focal=self.use_focal
)
weight_dict = {"loss_ce": class_weight, "loss_bbox": l1_weight, "loss_giou": giou_weight}
if self.deep_supervision:
aux_weight_dict = {}
for i in range(self.num_heads - 1):
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ["labels", "boxes"]
self.criterion = SetCriterionDynamicK(
cfg=cfg, num_classes=self.num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=no_object_weight,
losses=losses, use_focal=self.use_focal,)
pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(3, 1, 1)
pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(3, 1, 1)
self.normalizer = lambda x: (x - pixel_mean) / pixel_std
self.to(self.device)
def predict_noise_from_start(self, x_t, t, x0):
return (
(extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - x0) /
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
)
def model_predictions(self, backbone_feats, images_whwh, x, t, x_self_cond=None, clip_x_start=False):
x_boxes = torch.clamp(x, min=-1 * self.scale, max=self.scale)
x_boxes = ((x_boxes / self.scale) + 1) / 2 | x_boxes = box_cxcywh_to_xyxy(x_boxes) | 3 | 2023-11-17 02:37:37+00:00 | 12k |
fg320/DEASC | examples/05_3x3_farm_wso_SLSQP_visualization.py | [
{
"identifier": "WfModel",
"path": "deasc/wf_model.py",
"snippet": "class WfModel:\n \"\"\"\n Class for wind farm modelling (Interface setup but not limited to FLORIS\n framework).\n \"\"\"\n\n def __init__(self, input_file, path):\n \"\"\"\n Initialise wind farm object by p... | import numpy as np
import matplotlib.pyplot as plt
from deasc import WfModel
from deasc import WSOpt
from deasc.visualisation import (
wso_optimal_yaw_angles,
wso_optimal_flow_field,
wso_plot_details_iterations,
wso_plot_details_evaluations,
wso_explore_optimum_power_1var
) | 10,596 |
"""
This example shows the plotting methods for wake steering optimisation on a 3x3 wind farm
of NREL 5 MW turbines. The optimisation conditions are the same as for example 04. The
plotting methods include: optimal yaw angles plot, optimizer iteration details, objective
function evaluation details, and optimum exploration.
"""
# Input file definition
path = "./inputs/"
input_file = "gch.yaml"
# Initialise wind farm model
wf_model = WfModel(input_file, path)
# Change wind farm layout
n_row = 3
n_col = 3
spac_x = 7
spac_y = 5
wf_model.set_aligned_layout(n_row, n_col, spac_x, spac_y)
# Specify atmopheric conditions
ws = 8.0
wd = 270
ti = 0.05
shear = 0.0
# Wake steering optimisation inputs
yaw_initial = np.full(shape=(n_row*n_col), fill_value=0)
inflow = (yaw_initial, wd, ws, ti, shear)
variables = [1, 2, 3, 4, 5, 6]
var_bounds = (-25, 25)
var_initial = np.full(shape=(len(variables)), fill_value=0)
# Initialise optimisation object
wso_obj = WSOpt(wf_model=wf_model,
inflow=inflow,
variables=variables,
var_bounds=var_bounds,
var_initial=var_initial,
opt_method="SLSQP",
opt_options=None,
obj_function="Farm Power",
constraints=(None, None, None),
by_row=(False, None, None),
tuning_dynamic=False
)
# Optimise, extract and print optimal yaw angles
opt_yaw_angles_vars, opt_yaw_angles_all = wso_obj.optimize_yaw()
print('Optimal farm yaw angles:')
print(opt_yaw_angles_all)
# Get optimisation details and print number of iterations and evaluations
iter_details, eval_details = wso_obj.get_optimization_details()
print('Number of optimiser iterations: %i' % (len(iter_details[0])))
print('Number of objective function evaluations: %i' % (len(eval_details[0])))
# Plot optimisation details and results
wso_optimal_yaw_angles(wso_obj, radius=1.2)
wso_optimal_flow_field(wso_obj)
wso_plot_details_iterations(wso_obj)
wso_plot_details_evaluations(wso_obj)
|
"""
This example shows the plotting methods for wake steering optimisation on a 3x3 wind farm
of NREL 5 MW turbines. The optimisation conditions are the same as for example 04. The
plotting methods include: optimal yaw angles plot, optimizer iteration details, objective
function evaluation details, and optimum exploration.
"""
# Input file definition
path = "./inputs/"
input_file = "gch.yaml"
# Initialise wind farm model
wf_model = WfModel(input_file, path)
# Change wind farm layout
n_row = 3
n_col = 3
spac_x = 7
spac_y = 5
wf_model.set_aligned_layout(n_row, n_col, spac_x, spac_y)
# Specify atmopheric conditions
ws = 8.0
wd = 270
ti = 0.05
shear = 0.0
# Wake steering optimisation inputs
yaw_initial = np.full(shape=(n_row*n_col), fill_value=0)
inflow = (yaw_initial, wd, ws, ti, shear)
variables = [1, 2, 3, 4, 5, 6]
var_bounds = (-25, 25)
var_initial = np.full(shape=(len(variables)), fill_value=0)
# Initialise optimisation object
wso_obj = WSOpt(wf_model=wf_model,
inflow=inflow,
variables=variables,
var_bounds=var_bounds,
var_initial=var_initial,
opt_method="SLSQP",
opt_options=None,
obj_function="Farm Power",
constraints=(None, None, None),
by_row=(False, None, None),
tuning_dynamic=False
)
# Optimise, extract and print optimal yaw angles
opt_yaw_angles_vars, opt_yaw_angles_all = wso_obj.optimize_yaw()
print('Optimal farm yaw angles:')
print(opt_yaw_angles_all)
# Get optimisation details and print number of iterations and evaluations
iter_details, eval_details = wso_obj.get_optimization_details()
print('Number of optimiser iterations: %i' % (len(iter_details[0])))
print('Number of objective function evaluations: %i' % (len(eval_details[0])))
# Plot optimisation details and results
wso_optimal_yaw_angles(wso_obj, radius=1.2)
wso_optimal_flow_field(wso_obj)
wso_plot_details_iterations(wso_obj)
wso_plot_details_evaluations(wso_obj) | wso_explore_optimum_power_1var(wso_obj, turbine=5, yaw_bounds=(-25, 25), yaw_number=51) | 6 | 2023-11-10 18:13:27+00:00 | 12k |
CPES-Power-and-Energy-Systems/interoperable-recommender-tso | energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/bayesian_optimization.py | [
{
"identifier": "GaussianProcess",
"path": "energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/helpers.py",
"snippet": "class GaussianProcess(BaseEstimator, RegressorMixin):\n \"\"\"The legacy Gaussian Process model class.\n\n .. deprecated:: 0.18\n ... | import numpy as np
from .helpers import GaussianProcess
from scipy.optimize import minimize
from .helpers import UtilityFunction, unique_rows, PrintLog | 10,755 | res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max),
x_try.reshape(1, -1),
bounds=bounds,
method="L-BFGS-B")
# Store it if better than previous minimum(maximum).
if max_acq is None or -res.fun >= max_acq:
x_max = res.x
max_acq = -res.fun
# Clip output to make sure it lies within the bounds. Due to floating
# point technicalities this is not always the case.
return np.clip(x_max, bounds[:, 0], bounds[:, 1])
def matern52(theta, d):
"""
Matern 5/2 correlation model.::
theta, d --> r(theta, d) = (1+sqrt(5)*r + 5/3*r^2)*exp(-sqrt(5)*r) n
where r = sqrt(sum (d_i)^2 / (theta_i)^2 ) i = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) containing the values of the
autocorrelation modle.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
if theta.size == 1:
r = np.sqrt(np.sum(d ** 2, axis=1)) / theta[0]
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
r = np.sqrt(np.sum(d ** 2 / theta.reshape(1, n_features) ** 2, axis=1))
return (1 + np.sqrt(5) * r + 5 / 3. * r ** 2) * np.exp(-np.sqrt(5) * r)
class BayesianOptimization(object):
def __init__(self, f, pbounds, verbose=1):
"""
:param f:
Function to be maximized.
:param pbounds:
Dictionary with parameters names as keys and a tuple with minimum
and maximum values.
:param verbose:
Whether or not to print progress.
"""
# Store the original dictionary
self.pbounds = pbounds
# Get the name of the parameters
self.keys = list(pbounds.keys())
# Find number of parameters
self.dim = len(pbounds)
# Create an array with parameters bounds
self.bounds = []
for key in self.pbounds.keys():
self.bounds.append(self.pbounds[key])
self.bounds = np.asarray(self.bounds)
# Some function to be optimized
self.f = f
# Initialization flag
self.initialized = False
# Initialization lists --- stores starting points before process begins
self.init_points = []
self.x_init = []
self.y_init = []
# Numpy array place holders
self.X = None
self.Y = None
# Counter of iterations
self.i = 0
# Since scipy 0.16 passing lower and upper bound to theta seems to be
# broken. However, there is a lot of development going on around GP
# is scikit-learn. So I'll pick the easy route here and simple specify
# only theta0.
self.gp = GaussianProcess(corr=matern52,
theta0=np.random.uniform(0.001, 0.05,
self.dim),
thetaL=1e-5 * np.ones(self.dim),
thetaU=1e0 * np.ones(self.dim),
random_start=30)
# Utility Function placeholder
self.util = None
# PrintLog object
| """
BAYESIAN OPTIMIZATION MODULE - Version 0.1.0
Created by Fernando Nogueira (fmfn). Available in
- https://github.com/fmfn/BayesianOptimization
"""
__author__ = 'fmfn'
def acq_max(ac, gp, y_max, bounds):
"""
A function to find the maximum of the acquisition function using
the 'L-BFGS-B' method.
Parameters
----------
:param ac:
The acquisition function object that return its point-wise value.
:param gp:
A gaussian process fitted to the relevant data.
:param y_max:
The current maximum known value of the target function.
:param bounds:
The variables bounds to limit the search of the acq max.
Returns
-------
:return: x_max, The arg max of the acquisition function.
"""
# Start with the lower bound as the argmax
x_max = bounds[:, 0]
max_acq = None
x_tries = np.random.uniform(bounds[:, 0], bounds[:, 1],
size=(100, bounds.shape[0]))
for x_try in x_tries:
# Find the minimum of minus the acquisition function
res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max),
x_try.reshape(1, -1),
bounds=bounds,
method="L-BFGS-B")
# Store it if better than previous minimum(maximum).
if max_acq is None or -res.fun >= max_acq:
x_max = res.x
max_acq = -res.fun
# Clip output to make sure it lies within the bounds. Due to floating
# point technicalities this is not always the case.
return np.clip(x_max, bounds[:, 0], bounds[:, 1])
def matern52(theta, d):
"""
Matern 5/2 correlation model.::
theta, d --> r(theta, d) = (1+sqrt(5)*r + 5/3*r^2)*exp(-sqrt(5)*r) n
where r = sqrt(sum (d_i)^2 / (theta_i)^2 ) i = 1
Parameters
----------
theta : array_like
An array with shape 1 (isotropic) or n (anisotropic) giving the
autocorrelation parameter(s).
d : array_like
An array with shape (n_eval, n_features) giving the componentwise
distances between locations x and x' at which the correlation model
should be evaluated.
Returns
-------
r : array_like
An array with shape (n_eval, ) containing the values of the
autocorrelation modle.
"""
theta = np.asarray(theta, dtype=np.float)
d = np.asarray(d, dtype=np.float)
if d.ndim > 1:
n_features = d.shape[1]
else:
n_features = 1
if theta.size == 1:
r = np.sqrt(np.sum(d ** 2, axis=1)) / theta[0]
elif theta.size != n_features:
raise ValueError("Length of theta must be 1 or %s" % n_features)
else:
r = np.sqrt(np.sum(d ** 2 / theta.reshape(1, n_features) ** 2, axis=1))
return (1 + np.sqrt(5) * r + 5 / 3. * r ** 2) * np.exp(-np.sqrt(5) * r)
class BayesianOptimization(object):
def __init__(self, f, pbounds, verbose=1):
"""
:param f:
Function to be maximized.
:param pbounds:
Dictionary with parameters names as keys and a tuple with minimum
and maximum values.
:param verbose:
Whether or not to print progress.
"""
# Store the original dictionary
self.pbounds = pbounds
# Get the name of the parameters
self.keys = list(pbounds.keys())
# Find number of parameters
self.dim = len(pbounds)
# Create an array with parameters bounds
self.bounds = []
for key in self.pbounds.keys():
self.bounds.append(self.pbounds[key])
self.bounds = np.asarray(self.bounds)
# Some function to be optimized
self.f = f
# Initialization flag
self.initialized = False
# Initialization lists --- stores starting points before process begins
self.init_points = []
self.x_init = []
self.y_init = []
# Numpy array place holders
self.X = None
self.Y = None
# Counter of iterations
self.i = 0
# Since scipy 0.16 passing lower and upper bound to theta seems to be
# broken. However, there is a lot of development going on around GP
# is scikit-learn. So I'll pick the easy route here and simple specify
# only theta0.
self.gp = GaussianProcess(corr=matern52,
theta0=np.random.uniform(0.001, 0.05,
self.dim),
thetaL=1e-5 * np.ones(self.dim),
thetaU=1e0 * np.ones(self.dim),
random_start=30)
# Utility Function placeholder
self.util = None
# PrintLog object | self.plog = PrintLog(self.keys) | 3 | 2023-11-17 09:23:38+00:00 | 12k |
PlaxtonFlarion/NexaFlow | nexaflow/cutter/cutter.py | [
{
"identifier": "toolbox",
"path": "nexaflow/toolbox.py",
"snippet": "def video_capture(video_path: str):\ndef video_jump(video_cap: cv2.VideoCapture, frame_id: int):\ndef compare_ssim(pic1: np.ndarray, pic2: np.ndarray) -> float:\ndef multi_compare_ssim(\n pic1_list: typing.List, pic2_list: typing.L... | import os
import time
import typing
import numpy as np
from loguru import logger
from typing import List, Tuple
from concurrent.futures import ThreadPoolExecutor
from nexaflow import toolbox
from nexaflow.cutter.cut_range import VideoCutRange
from nexaflow.cutter.cut_result import VideoCutResult
from nexaflow.video import VideoObject, VideoFrame
from nexaflow.hook import BaseHook, GreyHook, CompressHook | 8,679 |
class Window(object):
def __init__(self, video: "VideoObject", *args):
self.video = video
assert len(args) == 7, "需要7个参数"
(self.step, self.block, self.window_size, self.window_coefficient,
self.start, self.video_length, self.frame_total) = args
self.end = self.start + self.window_size * self.step
def load_data(self) -> typing.List[VideoFrame]:
cur = self.start
result = []
video_operator = self.video.get_operator()
while cur <= self.end:
frame = video_operator.get_frame_by_id(cur)
result.append(frame)
cur += self.step
if len(result) < 2:
last = video_operator.get_frame_by_id(self.end)
result.append(last)
return result
def shift(self) -> bool:
self.start += self.step
self.end += self.step
if self.start >= self.video_length:
return False
if self.end >= self.video_length:
self.end = self.video_length
return True
def float_merge(self, float_list: typing.List[float]) -> float:
length = len(float_list)
result = 0.0
denominator = 0.0
for i, each in enumerate(float_list):
weight = pow(length - i, self.window_coefficient)
denominator += weight
result += each * weight
final = result / denominator
return final
class VideoCutter(object):
def __init__(
self,
step: int = None,
compress_rate: float = None,
target_size: typing.Tuple[int, int] = None,
):
self.step = step or 1
if (not compress_rate) and (not target_size):
# logger.debug(
# f"no compress rate or target size received. set compress rate to 0.2"
# )
compress_rate = 0.2
self._hook_list: typing.List[BaseHook] = list()
compress_hook = CompressHook(
overwrite=True, compress_rate=compress_rate, target_size=target_size
)
|
class Window(object):
def __init__(self, video: "VideoObject", *args):
self.video = video
assert len(args) == 7, "需要7个参数"
(self.step, self.block, self.window_size, self.window_coefficient,
self.start, self.video_length, self.frame_total) = args
self.end = self.start + self.window_size * self.step
def load_data(self) -> typing.List[VideoFrame]:
cur = self.start
result = []
video_operator = self.video.get_operator()
while cur <= self.end:
frame = video_operator.get_frame_by_id(cur)
result.append(frame)
cur += self.step
if len(result) < 2:
last = video_operator.get_frame_by_id(self.end)
result.append(last)
return result
def shift(self) -> bool:
self.start += self.step
self.end += self.step
if self.start >= self.video_length:
return False
if self.end >= self.video_length:
self.end = self.video_length
return True
def float_merge(self, float_list: typing.List[float]) -> float:
length = len(float_list)
result = 0.0
denominator = 0.0
for i, each in enumerate(float_list):
weight = pow(length - i, self.window_coefficient)
denominator += weight
result += each * weight
final = result / denominator
return final
class VideoCutter(object):
def __init__(
self,
step: int = None,
compress_rate: float = None,
target_size: typing.Tuple[int, int] = None,
):
self.step = step or 1
if (not compress_rate) and (not target_size):
# logger.debug(
# f"no compress rate or target size received. set compress rate to 0.2"
# )
compress_rate = 0.2
self._hook_list: typing.List[BaseHook] = list()
compress_hook = CompressHook(
overwrite=True, compress_rate=compress_rate, target_size=target_size
) | grey_hook = GreyHook(overwrite=True) | 6 | 2023-11-13 05:27:34+00:00 | 12k |
OpenBMB/XAgent | XAgentServer/application/cruds/interaction.py | [
{
"identifier": "InteractionDBInterface",
"path": "XAgentServer/database/interface/interaction.py",
"snippet": "class InteractionDBInterface(metaclass=abc.ABCMeta):\n \"\"\"Interaction DB Interface\n \"\"\"\n\n @classmethod\n def search_many_interaction(cls, db: Session) -> list[InteractionB... | import abc
import uuid
from datetime import datetime
from typing import List
from sqlalchemy.orm import Session
from XAgentServer.database.interface.interaction import InteractionDBInterface
from XAgentServer.exts.exception_ext import XAgentDBError
from XAgentServer.models.interaction import InteractionBase
from XAgentServer.database.models import Raw
from XAgentServer.models.parameter import InteractionParameter
from XAgentServer.models.raw import XAgentRaw | 8,430 | @classmethod
def delete_interaction(cls, db: Session, interaction_id: str):
"""
delete interaction
Args:
db: db
interaction_id: interaction id
Raises:
XAgentDBError: XAgent DB Error
"""
try:
InteractionDBInterface.delete_interaction(
db=db, interaction_id=interaction_id)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def get_shared_interaction(cls,
db: Session,
interaction_id: str) -> InteractionBase | None:
"""
get shared interaction
Args:
db: db
interaction_id: interaction id
Returns:
interaction InteractionBase, if not found, return None
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return InteractionDBInterface.get_shared_interaction(
db=db,
interaction_id=interaction_id)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def search_many_shared(cls,
db: Session,
page_size: int = 20,
page_index: int = 1) -> list[dict]:
"""
search many shared
Args:
db: db
page_size: page size
page_index: page index
Returns:
interaction list [dict]
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return InteractionDBInterface.search_many_shared(db=db,
page_size=page_size,
page_index=page_index)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def insert_raw(cls, db: Session, process: XAgentRaw):
"""
insert raw
Args:
db: db
process: process
Raises:
XAgentDBError: XAgent DB Error
"""
try:
InteractionDBInterface.insert_raw(db=db, process=process)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def search_many_raws(cls, db: Session, interaction_id: str) -> List[XAgentRaw] | None:
"""
search many raws
Args:
db: db
interaction_id: interaction id
Returns:
raw list [XAgentRaw]
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return [XAgentRaw.from_db(raw) for raw in
InteractionDBInterface.search_many_raws(db=db, interaction_id=interaction_id)]
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def get_raw(cls, db: Session, interaction_id: str, node_id: str) -> XAgentRaw | None:
"""
get raw
Args:
db: db
interaction_id: interaction id
node_id: node id
Returns:
raw XAgentRaw, if not found, return None
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return InteractionDBInterface.get_raw(db=db,
interaction_id=interaction_id,
node_id=node_id)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
| """XAgentServer application cruds interaction module."""
class InteractionCRUD(metaclass=abc.ABCMeta):
"""
interaction crud
"""
@classmethod
def search_many_interaction(cls, db: Session) -> list:
"""
search many interaction
"""
try:
return InteractionDBInterface.search_many_interaction(db=db)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def get_interaction(cls, db: Session, interaction_id: str) -> InteractionBase | None:
"""
get interaction
Args:
db: db
interaction_id: interaction id
Returns:
interaction InteractionBase
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return InteractionDBInterface.get_interaction(db=db, interaction_id=interaction_id)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def create_interaction(cls, db: Session, base: InteractionBase):
"""
create interaction
Args:
db: db
base: base
Raises:
XAgentDBError: XAgent DB Error
"""
try:
InteractionDBInterface.create_interaction(db=db, base=base)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def get_ready_interaction(cls, db: Session, user_id: str):
"""
create interaction
Args:
db: db
user_id: user_id
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return InteractionDBInterface.get_ready_interaction(db=db, user_id=user_id)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def add_parameter(cls, db: Session, parameter: InteractionParameter = None):
"""
add parameter
Args:
db: db
parameter: parameter
Raises:
XAgentDBError: XAgent DB Error
"""
try:
InteractionDBInterface.add_parameter(db=db, parameter=parameter)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def get_parameter(cls, db: Session, interaction_id: str) -> list:
"""
get parameter
Args:
db: db
interaction_id: interaction id
Returns:
parameter list [InteractionParameter]
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return InteractionDBInterface.get_parameter(db=db, interaction_id=interaction_id)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def get_init_parameter(cls, db: Session, interaction_id: str) -> InteractionParameter:
"""
get init parameter
Args:
db: db
interaction_id: interaction id
Returns:
parameter InteractionParameter
Raises:
XAgentDBError: XAgent DB Error
"""
try:
parameters = InteractionDBInterface.get_parameter(db=db, interaction_id=interaction_id)
init_parameter = parameters[0]
parameter = InteractionParameter.from_json({"args": init_parameter, "interaction_id": interaction_id, "parameter_id": None})
return parameter
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def search_interaction_by_user_id(cls,
db: Session,
user_id: str,
page_size: int = 10,
page_num: int = 1) -> list[dict]:
"""
get interaction by user id
Args:
db: db
user_id: user id
page_size: page size
page_num: page num
Returns:
interaction list [dict]
Raises:
XAgentDBError: XAgent DB Error
"""
return InteractionDBInterface.search_interaction_by_user_id(db=db,
user_id=user_id,
page_size=page_size,
page_num=page_num)
@classmethod
def is_exist(cls, db: Session, interaction_id: str) -> bool:
"""
interaction is exist
Args:
db: db
interaction_id: interaction id
Returns:
True if interaction is exist, else False
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return InteractionDBInterface.is_exist(db=db, interaction_id=interaction_id)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def update_interaction(cls, db: Session, base_data: dict):
"""
update interaction
Args:
db: db
base_data: base data
Raises:
XAgentDBError: XAgent DB Error
"""
try:
InteractionDBInterface.update_interaction(db=db, base_data=base_data)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def update_interaction_status(cls,
db: Session,
interaction_id: str,
status: str,
message: str,
current_step: int):
"""
update interaction status
Args:
db: db
interaction_id: interaction id
status: status
message: message
current_step: current step
Raises:
XAgentDBError: XAgent DB Error
"""
try:
InteractionDBInterface.update_interaction_status(
db=db,
interaction_id=interaction_id,
status=status,
message=message,
current_step=current_step)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def update_interaction_parameter(cls,
db: Session,
interaction_id: str,
parameter: InteractionParameter):
"""
update interaction parameter
Args:
db: db
interaction_id: interaction id
parameter: parameter
Raises:
XAgentDBError: XAgent DB Error
"""
try:
InteractionDBInterface.update_interaction_parameter(
db=db,
interaction_id=interaction_id,
parameter=parameter)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def is_running(cls, db: Session, user_id: str):
"""
is running
Args:
db: db
user_id: user id
Returns:
True if running, else False
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return InteractionDBInterface.is_running(db=db, user_id=user_id)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def delete_interaction(cls, db: Session, interaction_id: str):
"""
delete interaction
Args:
db: db
interaction_id: interaction id
Raises:
XAgentDBError: XAgent DB Error
"""
try:
InteractionDBInterface.delete_interaction(
db=db, interaction_id=interaction_id)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def get_shared_interaction(cls,
db: Session,
interaction_id: str) -> InteractionBase | None:
"""
get shared interaction
Args:
db: db
interaction_id: interaction id
Returns:
interaction InteractionBase, if not found, return None
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return InteractionDBInterface.get_shared_interaction(
db=db,
interaction_id=interaction_id)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def search_many_shared(cls,
db: Session,
page_size: int = 20,
page_index: int = 1) -> list[dict]:
"""
search many shared
Args:
db: db
page_size: page size
page_index: page index
Returns:
interaction list [dict]
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return InteractionDBInterface.search_many_shared(db=db,
page_size=page_size,
page_index=page_index)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def insert_raw(cls, db: Session, process: XAgentRaw):
"""
insert raw
Args:
db: db
process: process
Raises:
XAgentDBError: XAgent DB Error
"""
try:
InteractionDBInterface.insert_raw(db=db, process=process)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def search_many_raws(cls, db: Session, interaction_id: str) -> List[XAgentRaw] | None:
"""
search many raws
Args:
db: db
interaction_id: interaction id
Returns:
raw list [XAgentRaw]
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return [XAgentRaw.from_db(raw) for raw in
InteractionDBInterface.search_many_raws(db=db, interaction_id=interaction_id)]
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod
def get_raw(cls, db: Session, interaction_id: str, node_id: str) -> XAgentRaw | None:
"""
get raw
Args:
db: db
interaction_id: interaction id
node_id: node id
Returns:
raw XAgentRaw, if not found, return None
Raises:
XAgentDBError: XAgent DB Error
"""
try:
return InteractionDBInterface.get_raw(db=db,
interaction_id=interaction_id,
node_id=node_id)
except Exception as e:
raise XAgentDBError(f"XAgent DB Error [Interact Module]: {str(e)}") from e
@classmethod | def get_next_send(cls, db: Session, interaction_id: str) -> List[Raw] | None: | 3 | 2023-10-16 03:44:57+00:00 | 12k |
PKU-YuanGroup/Video-LLaVA | llava/model/multimodal_encoder/languagebind/video/modeling_video.py | [
{
"identifier": "LanguageBindVideoConfig",
"path": "llava/model/multimodal_encoder/languagebind/video/configuration_video.py",
"snippet": "class LanguageBindVideoConfig(PretrainedConfig):\n r\"\"\"\n [`CLIPConfig`] is the configuration class to store the configuration of a [`CLIPModel`]. It is use... | import math
import torch
from typing import Optional, Tuple, Union
from einops import rearrange
from peft import LoraConfig, get_peft_model
from torch import nn
from torch.nn import functional as F
from transformers import PreTrainedModel, add_start_docstrings
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from transformers.models.clip.modeling_clip import CLIPMLP, CLIPAttention, CLIPTextEmbeddings, CLIPVisionEmbeddings, \
CLIPVisionModelWithProjection, CLIPTextModelWithProjection, _expand_mask, CLIPOutput, clip_loss
from transformers.utils import add_start_docstrings_to_model_forward, replace_return_docstrings
from .configuration_video import LanguageBindVideoConfig, CLIPVisionConfig, CLIPTextConfig | 7,852 | Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`CLIPEncoderLayer`].
Args:
config: CLIPConfig
"""
def __init__(self, config: LanguageBindVideoConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([CLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
inputs_embeds,
attention_mask: Optional[torch.Tensor] = None,
causal_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutput]:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
causal_attention_mask,
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
class CLIPTextTransformer(nn.Module):
|
class PatchDropout(nn.Module):
"""
https://arxiv.org/abs/2212.00794
"""
def __init__(self, prob, exclude_first_token=True):
super().__init__()
assert 0 <= prob < 1.
self.prob = prob
self.exclude_first_token = exclude_first_token # exclude CLS token
def forward(self, x, B, T):
if not self.training or self.prob == 0.:
return x
if self.exclude_first_token:
cls_tokens, x = x[:, :1], x[:, 1:]
else:
cls_tokens = torch.jit.annotate(torch.Tensor, x[:, :1])
batch = x.size()[0]
num_tokens = x.size()[1]
batch_indices = torch.arange(batch)
batch_indices = batch_indices[..., None]
keep_prob = 1 - self.prob
num_patches_keep = max(1, int(num_tokens * keep_prob))
if T == 1:
rand = torch.randn(batch, num_tokens)
patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices
else:
rand = torch.randn(B, num_tokens)
patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices
patch_indices_keep = patch_indices_keep.unsqueeze(1).repeat(1, T, 1)
patch_indices_keep = rearrange(patch_indices_keep, 'b t n -> (b t) n')
x = x[batch_indices, patch_indices_keep]
if self.exclude_first_token:
x = torch.cat((cls_tokens, x), dim=1)
return x
class CLIPEncoderLayer(nn.Module):
def __init__(self, config: LanguageBindVideoConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = CLIPAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = CLIPMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.add_time_attn = config.add_time_attn
if self.add_time_attn:
self.t = config.num_frames
self.temporal_embedding = nn.Parameter(torch.zeros(1, config.num_frames, config.hidden_size))
nn.init.normal_(self.temporal_embedding, std=config.hidden_size ** -0.5)
self.embed_dim = config.hidden_size
self.temporal_attn = CLIPAttention(config)
self.temporal_layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
# self.temporal_mlp = CLIPMLP(config)
# self.temporal_layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
causal_attention_mask: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
if self.add_time_attn:
bt, n, d = hidden_states.shape
t = self.t
# time embed
if t != 1:
n = hidden_states.shape[1]
hidden_states = rearrange(hidden_states, '(b t) n d -> (b n) t d', t=t)
hidden_states = hidden_states + self.temporal_embedding[:, :t, :]
hidden_states = rearrange(hidden_states, '(b n) t d -> (b t) n d', n=n)
# time attn
residual = hidden_states
hidden_states = rearrange(hidden_states, '(b t) n d -> (b n) t d', t=t)
# hidden_states = self.layer_norm1(hidden_states) # share layernorm
hidden_states = self.temporal_layer_norm1(hidden_states)
hidden_states, attn_weights = self.temporal_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
causal_attention_mask=causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = residual + rearrange(hidden_states, '(b n) t d -> (b t) n d', n=n)
# residual = hidden_states
# hidden_states = rearrange(hidden_states, '(b t) n d -> (b n) t d', t=t)
# # hidden_states = self.layer_norm2(hidden_states) # share layernorm
# hidden_states = self.temporal_layer_norm2(hidden_states)
# hidden_states = self.temporal_mlp(hidden_states)
# hidden_states = residual + rearrange(hidden_states, '(b n) t d -> (b t) n d', n=n)
# spatial attn
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
causal_attention_mask=causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class CLIPPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LanguageBindVideoConfig
base_model_prefix = "clip"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_factor
if isinstance(module, CLIPTextEmbeddings):
module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
elif isinstance(module, CLIPVisionEmbeddings):
factor = self.config.initializer_factor
nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
elif isinstance(module, CLIPAttention):
factor = self.config.initializer_factor
in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
out_proj_std = (module.embed_dim**-0.5) * factor
nn.init.normal_(module.q_proj.weight, std=in_proj_std)
nn.init.normal_(module.k_proj.weight, std=in_proj_std)
nn.init.normal_(module.v_proj.weight, std=in_proj_std)
nn.init.normal_(module.out_proj.weight, std=out_proj_std)
elif isinstance(module, CLIPMLP):
factor = self.config.initializer_factor
in_proj_std = (
(module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
)
fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
nn.init.normal_(module.fc1.weight, std=fc_std)
nn.init.normal_(module.fc2.weight, std=in_proj_std)
elif isinstance(module, LanguageBindVideo):
nn.init.normal_(
module.text_projection.weight,
std=module.text_embed_dim**-0.5 * self.config.initializer_factor,
)
nn.init.normal_(
module.visual_projection.weight,
std=module.vision_embed_dim**-0.5 * self.config.initializer_factor,
)
elif isinstance(module, CLIPVisionModelWithProjection):
nn.init.normal_(
module.visual_projection.weight,
std=self.config.hidden_size**-0.5 * self.config.initializer_factor,
)
elif isinstance(module, CLIPTextModelWithProjection):
nn.init.normal_(
module.text_projection.weight,
std=self.config.hidden_size**-0.5 * self.config.initializer_factor,
)
if isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, CLIPEncoder):
module.gradient_checkpointing = value
CLIP_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`CLIPConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
CLIP_TEXT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
CLIP_VISION_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
CLIP_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
return_loss (`bool`, *optional*):
Whether or not to return the contrastive loss.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
class CLIPEncoder(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`CLIPEncoderLayer`].
Args:
config: CLIPConfig
"""
def __init__(self, config: LanguageBindVideoConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([CLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
inputs_embeds,
attention_mask: Optional[torch.Tensor] = None,
causal_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutput]:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
causal_attention_mask,
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(
input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device)
mask_cond = torch.arange(mask.size(-1), device=device)
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
class CLIPTextTransformer(nn.Module): | def __init__(self, config: CLIPTextConfig): | 2 | 2023-10-23 05:43:54+00:00 | 12k |
deepseek-ai/DreamCraft3D | extern/ldm_zero123/models/diffusion/classifier.py | [
{
"identifier": "EncoderUNetModel",
"path": "extern/ldm_zero123/modules/diffusionmodules/openaimodel.py",
"snippet": "class EncoderUNetModel(nn.Module):\n \"\"\"\n The half UNet model with attention and timestep embedding.\n For usage, see UNet.\n \"\"\"\n\n def __init__(\n self,\n... | import os
import pytorch_lightning as pl
import torch
from copy import deepcopy
from glob import glob
from einops import rearrange
from natsort import natsorted
from omegaconf import OmegaConf
from torch.nn import functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from extern.ldm_zero123.modules.diffusionmodules.openaimodel import (
EncoderUNetModel,
UNetModel,
)
from extern.ldm_zero123.util import (
default,
instantiate_from_config,
ismap,
log_txt_as_img,
) | 7,969 | def compute_top_k(self, logits, labels, k, reduction="mean"):
_, top_ks = torch.topk(logits, k, dim=1)
if reduction == "mean":
return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item()
elif reduction == "none":
return (top_ks == labels[:, None]).float().sum(dim=-1)
def on_train_epoch_start(self):
# save some memory
self.diffusion_model.model.to("cpu")
@torch.no_grad()
def write_logs(self, loss, logits, targets):
log_prefix = "train" if self.training else "val"
log = {}
log[f"{log_prefix}/loss"] = loss.mean()
log[f"{log_prefix}/acc@1"] = self.compute_top_k(
logits, targets, k=1, reduction="mean"
)
log[f"{log_prefix}/acc@5"] = self.compute_top_k(
logits, targets, k=5, reduction="mean"
)
self.log_dict(
log, prog_bar=False, logger=True, on_step=self.training, on_epoch=True
)
self.log("loss", log[f"{log_prefix}/loss"], prog_bar=True, logger=False)
self.log(
"global_step", self.global_step, logger=False, on_epoch=False, prog_bar=True
)
lr = self.optimizers().param_groups[0]["lr"]
self.log("lr_abs", lr, on_step=True, logger=True, on_epoch=False, prog_bar=True)
def shared_step(self, batch, t=None):
x, *_ = self.diffusion_model.get_input(
batch, k=self.diffusion_model.first_stage_key
)
targets = self.get_conditioning(batch)
if targets.dim() == 4:
targets = targets.argmax(dim=1)
if t is None:
t = torch.randint(
0, self.diffusion_model.num_timesteps, (x.shape[0],), device=self.device
).long()
else:
t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long()
x_noisy = self.get_x_noisy(x, t)
logits = self(x_noisy, t)
loss = F.cross_entropy(logits, targets, reduction="none")
self.write_logs(loss.detach(), logits.detach(), targets.detach())
loss = loss.mean()
return loss, logits, x_noisy, targets
def training_step(self, batch, batch_idx):
loss, *_ = self.shared_step(batch)
return loss
def reset_noise_accs(self):
self.noisy_acc = {
t: {"acc@1": [], "acc@5": []}
for t in range(
0, self.diffusion_model.num_timesteps, self.diffusion_model.log_every_t
)
}
def on_validation_start(self):
self.reset_noise_accs()
@torch.no_grad()
def validation_step(self, batch, batch_idx):
loss, *_ = self.shared_step(batch)
for t in self.noisy_acc:
_, logits, _, targets = self.shared_step(batch, t)
self.noisy_acc[t]["acc@1"].append(
self.compute_top_k(logits, targets, k=1, reduction="mean")
)
self.noisy_acc[t]["acc@5"].append(
self.compute_top_k(logits, targets, k=5, reduction="mean")
)
return loss
def configure_optimizers(self):
optimizer = AdamW(
self.model.parameters(),
lr=self.learning_rate,
weight_decay=self.weight_decay,
)
if self.use_scheduler:
scheduler = instantiate_from_config(self.scheduler_config)
print("Setting up LambdaLR scheduler...")
scheduler = [
{
"scheduler": LambdaLR(optimizer, lr_lambda=scheduler.schedule),
"interval": "step",
"frequency": 1,
}
]
return [optimizer], scheduler
return optimizer
@torch.no_grad()
def log_images(self, batch, N=8, *args, **kwargs):
log = dict()
x = self.get_input(batch, self.diffusion_model.first_stage_key)
log["inputs"] = x
y = self.get_conditioning(batch)
if self.label_key == "class_label":
y = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
log["labels"] = y
|
__models__ = {"class_label": EncoderUNetModel, "segmentation": UNetModel}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
class NoisyLatentImageClassifier(pl.LightningModule):
def __init__(
self,
diffusion_path,
num_classes,
ckpt_path=None,
pool="attention",
label_key=None,
diffusion_ckpt_path=None,
scheduler_config=None,
weight_decay=1.0e-2,
log_steps=10,
monitor="val/loss",
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.num_classes = num_classes
# get latest config of diffusion model
diffusion_config = natsorted(
glob(os.path.join(diffusion_path, "configs", "*-project.yaml"))
)[-1]
self.diffusion_config = OmegaConf.load(diffusion_config).model
self.diffusion_config.params.ckpt_path = diffusion_ckpt_path
self.load_diffusion()
self.monitor = monitor
self.numd = self.diffusion_model.first_stage_model.encoder.num_resolutions - 1
self.log_time_interval = self.diffusion_model.num_timesteps // log_steps
self.log_steps = log_steps
self.label_key = (
label_key
if not hasattr(self.diffusion_model, "cond_stage_key")
else self.diffusion_model.cond_stage_key
)
assert (
self.label_key is not None
), "label_key neither in diffusion model nor in model.params"
if self.label_key not in __models__:
raise NotImplementedError()
self.load_classifier(ckpt_path, pool)
self.scheduler_config = scheduler_config
self.use_scheduler = self.scheduler_config is not None
self.weight_decay = weight_decay
def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
sd = torch.load(path, map_location="cpu")
if "state_dict" in list(sd.keys()):
sd = sd["state_dict"]
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
print("Deleting key {} from state_dict.".format(k))
del sd[k]
missing, unexpected = (
self.load_state_dict(sd, strict=False)
if not only_model
else self.model.load_state_dict(sd, strict=False)
)
print(
f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys"
)
if len(missing) > 0:
print(f"Missing Keys: {missing}")
if len(unexpected) > 0:
print(f"Unexpected Keys: {unexpected}")
def load_diffusion(self):
model = instantiate_from_config(self.diffusion_config)
self.diffusion_model = model.eval()
self.diffusion_model.train = disabled_train
for param in self.diffusion_model.parameters():
param.requires_grad = False
def load_classifier(self, ckpt_path, pool):
model_config = deepcopy(self.diffusion_config.params.unet_config.params)
model_config.in_channels = (
self.diffusion_config.params.unet_config.params.out_channels
)
model_config.out_channels = self.num_classes
if self.label_key == "class_label":
model_config.pool = pool
self.model = __models__[self.label_key](**model_config)
if ckpt_path is not None:
print(
"#####################################################################"
)
print(f'load from ckpt "{ckpt_path}"')
print(
"#####################################################################"
)
self.init_from_ckpt(ckpt_path)
@torch.no_grad()
def get_x_noisy(self, x, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x))
continuous_sqrt_alpha_cumprod = None
if self.diffusion_model.use_continuous_noise:
continuous_sqrt_alpha_cumprod = (
self.diffusion_model.sample_continuous_noise_level(x.shape[0], t + 1)
)
# todo: make sure t+1 is correct here
return self.diffusion_model.q_sample(
x_start=x,
t=t,
noise=noise,
continuous_sqrt_alpha_cumprod=continuous_sqrt_alpha_cumprod,
)
def forward(self, x_noisy, t, *args, **kwargs):
return self.model(x_noisy, t)
@torch.no_grad()
def get_input(self, batch, k):
x = batch[k]
if len(x.shape) == 3:
x = x[..., None]
x = rearrange(x, "b h w c -> b c h w")
x = x.to(memory_format=torch.contiguous_format).float()
return x
@torch.no_grad()
def get_conditioning(self, batch, k=None):
if k is None:
k = self.label_key
assert k is not None, "Needs to provide label key"
targets = batch[k].to(self.device)
if self.label_key == "segmentation":
targets = rearrange(targets, "b h w c -> b c h w")
for down in range(self.numd):
h, w = targets.shape[-2:]
targets = F.interpolate(targets, size=(h // 2, w // 2), mode="nearest")
# targets = rearrange(targets,'b c h w -> b h w c')
return targets
def compute_top_k(self, logits, labels, k, reduction="mean"):
_, top_ks = torch.topk(logits, k, dim=1)
if reduction == "mean":
return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item()
elif reduction == "none":
return (top_ks == labels[:, None]).float().sum(dim=-1)
def on_train_epoch_start(self):
# save some memory
self.diffusion_model.model.to("cpu")
@torch.no_grad()
def write_logs(self, loss, logits, targets):
log_prefix = "train" if self.training else "val"
log = {}
log[f"{log_prefix}/loss"] = loss.mean()
log[f"{log_prefix}/acc@1"] = self.compute_top_k(
logits, targets, k=1, reduction="mean"
)
log[f"{log_prefix}/acc@5"] = self.compute_top_k(
logits, targets, k=5, reduction="mean"
)
self.log_dict(
log, prog_bar=False, logger=True, on_step=self.training, on_epoch=True
)
self.log("loss", log[f"{log_prefix}/loss"], prog_bar=True, logger=False)
self.log(
"global_step", self.global_step, logger=False, on_epoch=False, prog_bar=True
)
lr = self.optimizers().param_groups[0]["lr"]
self.log("lr_abs", lr, on_step=True, logger=True, on_epoch=False, prog_bar=True)
def shared_step(self, batch, t=None):
x, *_ = self.diffusion_model.get_input(
batch, k=self.diffusion_model.first_stage_key
)
targets = self.get_conditioning(batch)
if targets.dim() == 4:
targets = targets.argmax(dim=1)
if t is None:
t = torch.randint(
0, self.diffusion_model.num_timesteps, (x.shape[0],), device=self.device
).long()
else:
t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long()
x_noisy = self.get_x_noisy(x, t)
logits = self(x_noisy, t)
loss = F.cross_entropy(logits, targets, reduction="none")
self.write_logs(loss.detach(), logits.detach(), targets.detach())
loss = loss.mean()
return loss, logits, x_noisy, targets
def training_step(self, batch, batch_idx):
loss, *_ = self.shared_step(batch)
return loss
def reset_noise_accs(self):
self.noisy_acc = {
t: {"acc@1": [], "acc@5": []}
for t in range(
0, self.diffusion_model.num_timesteps, self.diffusion_model.log_every_t
)
}
def on_validation_start(self):
self.reset_noise_accs()
@torch.no_grad()
def validation_step(self, batch, batch_idx):
loss, *_ = self.shared_step(batch)
for t in self.noisy_acc:
_, logits, _, targets = self.shared_step(batch, t)
self.noisy_acc[t]["acc@1"].append(
self.compute_top_k(logits, targets, k=1, reduction="mean")
)
self.noisy_acc[t]["acc@5"].append(
self.compute_top_k(logits, targets, k=5, reduction="mean")
)
return loss
def configure_optimizers(self):
optimizer = AdamW(
self.model.parameters(),
lr=self.learning_rate,
weight_decay=self.weight_decay,
)
if self.use_scheduler:
scheduler = instantiate_from_config(self.scheduler_config)
print("Setting up LambdaLR scheduler...")
scheduler = [
{
"scheduler": LambdaLR(optimizer, lr_lambda=scheduler.schedule),
"interval": "step",
"frequency": 1,
}
]
return [optimizer], scheduler
return optimizer
@torch.no_grad()
def log_images(self, batch, N=8, *args, **kwargs):
log = dict()
x = self.get_input(batch, self.diffusion_model.first_stage_key)
log["inputs"] = x
y = self.get_conditioning(batch)
if self.label_key == "class_label":
y = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
log["labels"] = y
| if ismap(y): | 4 | 2023-10-23 07:40:20+00:00 | 12k |
YORG-AI/Open-Assistant | package/src/yorgassistant/core/assistant/async_threads.py | [
{
"identifier": "Assistants",
"path": "package/src/yorgassistant/core/assistant/assistant.py",
"snippet": "class Assistants():\n def __init__(self, config,yaml_path:Optional[str] = None):\n self.config = config\n YamlPathConfig.assistants_yaml_path = yaml_path if yaml_path else 'assista... | import uuid
import time
import yaml
import os
import re
import logging
import json
import inspect
from typing import Any, List, Optional,Dict
from .assistant import Assistants
from ..nodes.openai.openai import OpenAINode,AsyncOpenAINode
from ..nodes.openai.openai_model import *
from .tools.tools import Tools, Tool
from .config import *
from .prompt.few_shot_cot_tools_choose_prompt import *
from .prompt.parameters_generate_prompt import *
from .prompt.response_generate_prompt import * | 8,047 | config = ThreadsConfig.from_dict(d)
return cls(config, YamlPathConfig.threads_yaml_path) # 使用传入的 yaml_path 创建 实例
# 如果没有找到,就抛出一个异常
raise ValueError(f'No threads with id {id} found in YAML file.')
@staticmethod
def get_all_threads() -> List[Dict[str, Any]]:
"""
读取 YAML 文件并返回所有 threads 的信息列表。
"""
# 确保 YAML 文件路径已经被设置
if YamlPathConfig.threads_yaml_path:
if not os.path.isfile(YamlPathConfig.threads_yaml_path):
# 如果文件路径存在但文件不存在,则创建一个空文件
with open(YamlPathConfig.threads_yaml_path, 'w') as file:
yaml.dump([], file)
else:
raise FileNotFoundError("The threads YAML file path is not set.")
# 读取 YAML 文件
with open(YamlPathConfig.threads_yaml_path, 'r') as file:
data = yaml.safe_load(file) or []
# 使用 from_dict 方法将每个字典转换为 ThreadsConfig 实例
threads_list = []
for item in data:
config = ThreadsConfig.from_dict(item)
threads_list.append(config)
return threads_list
async def run(self, assistant_id: str, input_text: str, **kwargs):
try:
# 使用 from_id 方法获取助手
assistant = Assistants.from_id(assistant_id)
tools_list = assistant.get_tools_type_list()
# 初始化 Tools 对象
tools = Tools()
# 获取 tools 的 summary
tools_summary = tools.get_tools_list_summary(tools_list)
# 如果第一次执行或当前的 tool 已执行完毕
if self.current_tool is None or self.current_tool.has_done():
# 使用 LLM 选择 tools
chosen_tools =await self._choose_tools(tools_summary, input_text)
# TODO: 支持多个 tool 执行
if len(chosen_tools) == 0:
logging.warn("No tool is recommended.")
self.current_tool = None
# 不使用 Tool, 直接 chat
res_message = self._chat(input_text, assistant)
else:
tool_name = chosen_tools[0]
# 获取对应的 tool 对象
self.current_tool = tools.get_tool(tool_name)
# 判断当前 tool 的执行是否需要 llm 生成参数
if self.current_tool is not None and self.current_tool.need_llm_generate_parameters():
# 使用 LLM 生成参数
parameters = self._generate_parameters(self.current_tool, input_text)
else:
parameters = kwargs
parameters['input_text'] = input_text
# 执行 tool
if self.current_tool is not None:
res_message = self.current_tool.call(**parameters)
# 根据执行结果,交给 LLM 进行包装
if self.current_tool is not None and self.current_tool.need_llm_generate_response():
# 使用 LLM 生成 response
res_message = self._generate_response(
self.current_tool, input_text, parameters, res_message, assistant
)
# 更新消息历史并保存到 YAML 文件
if isinstance(res_message, dict) and 'assistant' in res_message:
assistant_message_str = res_message['assistant']['message']
if res_message['type'] == 'success':
self._config.message_history.append(
[
{'user':input_text},
{'assistant':assistant_message_str},
]
)
self._config.assistant_id = assistant_id
await self.save_to_yaml()
res_message['content']['tool'] = self.current_tool.config.name
return res_message
else:
assistant_message_str = str(res_message)
self._config.message_history.append(
[
{'user':input_text},
{'assistant':assistant_message_str},
]
)
self._config.assistant_id = assistant_id
await self.save_to_yaml()
return {
'type': 'success',
'content': {'tool': self.current_tool.config.name},
'next_stages_info': {},
'assistant': {'message': assistant_message_str}
}
except Exception as e:
# 异常时的返回格式
logging.error(f"An error occurred: {e}")
return {
'type': 'error',
'content': {'message': str(e)},
'next_stages_info': {},
'assistant': {'message': ''}
}
async def _chat(
self, prompt: str, assistant: Assistants, system_message: Optional[str] = None
) -> str:
# 创建一个 OpenAINode 对象
|
def extract_bracket_content(s: str) -> list:
content = re.findall(r"\[(.*?)\]", s)
content = [c.replace("'", "") for c in content]
content = filter(lambda x: x != "", content)
ret = []
for item in content:
if "," in item:
ret.extend(item.split(","))
else:
ret.append(item)
return ret
class AsyncThreads:
current_tool: Tool
chat_node: OpenAINode # Threads 全局的 OpenAI node,仅用于 chat 交互以及对 tool 执行结果的分析(选择 tool 以及生成参数不使用该 node)
def __init__(self, config: ThreadsConfig,threads_yaml_path:Optional[str] = None):
self._config = config
self.current_tool = None
YamlPathConfig.threads_yaml_path = threads_yaml_path if threads_yaml_path else "threads.yaml"
@property
def config(self):
return self._config
@property
def id(self):
return self._config.id
def set_threads_yaml_path(yaml_path:str):
# 检查 yaml_path 是否为绝对路径
if not os.path.isabs(yaml_path):
# 获取调用此方法的栈帧
stack = inspect.stack()
caller_frame = stack[1]
# 获取调用者的文件路径
caller_path = caller_frame.filename
# 获取调用者的目录路径
caller_dir = os.path.dirname(caller_path)
# 构建 yaml 文件的绝对路径
full_yaml_path = os.path.join(caller_dir, yaml_path)
else:
full_yaml_path = yaml_path
# 获取 yaml 文件所在的目录
yaml_dir = os.path.dirname(full_yaml_path)
# 如果目录不存在,则创建它
os.makedirs(yaml_dir, exist_ok=True)
# 设置 yaml_path
YamlPathConfig.threads_yaml_path = full_yaml_path
async def save_to_yaml(self):
# 构建 threads.yaml 文件的绝对路径
threads_yaml_path = YamlPathConfig.threads_yaml_path
# 检查文件是否存在,如果不存在,则创建一个空的yaml文件
if not os.path.exists(threads_yaml_path):
with open(threads_yaml_path, 'w') as file:
file.write('') # 创建一个空文件
# 使用绝对路径打开 threads.yaml 文件
with open(threads_yaml_path, "r") as file:
data = yaml.safe_load(file) or []
# 查找具有相同 id 的 assistant
for i, d in enumerate(data):
if d["id"] == self.config.id:
# 如果找到了,就更新它
data[i] = self.config.to_dict()
break
else:
# 如果没有找到,就添加新的 assistant 到列表中
data.append(self.config.to_dict())
# 写回 YAML 文件
with open(threads_yaml_path, "w") as file:
yaml.dump(data, file)
@staticmethod
def create(yaml_file_path:str) -> "AsyncThreads":
# 创建 ThreadsConfig 对象
config = ThreadsConfig(
id=str(uuid.uuid4()),
object="AsyncThreads",
created_at=int(time.time()),
message_history=[],
metadata={},
)
# 创建 Threads 对象
threads = AsyncThreads(config,YamlPathConfig.threads_yaml_path)
# 保存到 YAML 文件
threads.save_to_yaml()
return threads
@classmethod
def from_id(cls, id: str) -> 'AsyncThreads':
# 使用传入的 yaml_path 参数打开 YAML 文件
with open(YamlPathConfig.threads_yaml_path, 'r') as file:
data = yaml.safe_load(file) or []
# 查找具有相同 id 的配置
for d in data:
if d['id'] == id:
# 如果找到了,就用这个配置创建一个新的对象
config = ThreadsConfig.from_dict(d)
return cls(config, YamlPathConfig.threads_yaml_path) # 使用传入的 yaml_path 创建 实例
# 如果没有找到,就抛出一个异常
raise ValueError(f'No threads with id {id} found in YAML file.')
@staticmethod
def get_all_threads() -> List[Dict[str, Any]]:
"""
读取 YAML 文件并返回所有 threads 的信息列表。
"""
# 确保 YAML 文件路径已经被设置
if YamlPathConfig.threads_yaml_path:
if not os.path.isfile(YamlPathConfig.threads_yaml_path):
# 如果文件路径存在但文件不存在,则创建一个空文件
with open(YamlPathConfig.threads_yaml_path, 'w') as file:
yaml.dump([], file)
else:
raise FileNotFoundError("The threads YAML file path is not set.")
# 读取 YAML 文件
with open(YamlPathConfig.threads_yaml_path, 'r') as file:
data = yaml.safe_load(file) or []
# 使用 from_dict 方法将每个字典转换为 ThreadsConfig 实例
threads_list = []
for item in data:
config = ThreadsConfig.from_dict(item)
threads_list.append(config)
return threads_list
async def run(self, assistant_id: str, input_text: str, **kwargs):
try:
# 使用 from_id 方法获取助手
assistant = Assistants.from_id(assistant_id)
tools_list = assistant.get_tools_type_list()
# 初始化 Tools 对象
tools = Tools()
# 获取 tools 的 summary
tools_summary = tools.get_tools_list_summary(tools_list)
# 如果第一次执行或当前的 tool 已执行完毕
if self.current_tool is None or self.current_tool.has_done():
# 使用 LLM 选择 tools
chosen_tools =await self._choose_tools(tools_summary, input_text)
# TODO: 支持多个 tool 执行
if len(chosen_tools) == 0:
logging.warn("No tool is recommended.")
self.current_tool = None
# 不使用 Tool, 直接 chat
res_message = self._chat(input_text, assistant)
else:
tool_name = chosen_tools[0]
# 获取对应的 tool 对象
self.current_tool = tools.get_tool(tool_name)
# 判断当前 tool 的执行是否需要 llm 生成参数
if self.current_tool is not None and self.current_tool.need_llm_generate_parameters():
# 使用 LLM 生成参数
parameters = self._generate_parameters(self.current_tool, input_text)
else:
parameters = kwargs
parameters['input_text'] = input_text
# 执行 tool
if self.current_tool is not None:
res_message = self.current_tool.call(**parameters)
# 根据执行结果,交给 LLM 进行包装
if self.current_tool is not None and self.current_tool.need_llm_generate_response():
# 使用 LLM 生成 response
res_message = self._generate_response(
self.current_tool, input_text, parameters, res_message, assistant
)
# 更新消息历史并保存到 YAML 文件
if isinstance(res_message, dict) and 'assistant' in res_message:
assistant_message_str = res_message['assistant']['message']
if res_message['type'] == 'success':
self._config.message_history.append(
[
{'user':input_text},
{'assistant':assistant_message_str},
]
)
self._config.assistant_id = assistant_id
await self.save_to_yaml()
res_message['content']['tool'] = self.current_tool.config.name
return res_message
else:
assistant_message_str = str(res_message)
self._config.message_history.append(
[
{'user':input_text},
{'assistant':assistant_message_str},
]
)
self._config.assistant_id = assistant_id
await self.save_to_yaml()
return {
'type': 'success',
'content': {'tool': self.current_tool.config.name},
'next_stages_info': {},
'assistant': {'message': assistant_message_str}
}
except Exception as e:
# 异常时的返回格式
logging.error(f"An error occurred: {e}")
return {
'type': 'error',
'content': {'message': str(e)},
'next_stages_info': {},
'assistant': {'message': ''}
}
async def _chat(
self, prompt: str, assistant: Assistants, system_message: Optional[str] = None
) -> str:
# 创建一个 OpenAINode 对象 | response_node = AsyncOpenAINode() | 2 | 2023-10-24 15:15:48+00:00 | 12k |
zju3dv/4K4D | scripts/ray_tracing/shading_ball.py | [
{
"identifier": "dotdict",
"path": "easyvolcap/utils/base_utils.py",
"snippet": "class dotdict(dict, Dict[KT, VT]):\n \"\"\"\n This is the default data passing object used throughout the codebase\n Main function: dot access for dict values & dict like merging and updates\n\n a dictionary tha... | import math
import torch
import argparse
import sys
from tqdm import tqdm
from glob import glob
from easyvolcap.utils.base_utils import dotdict
from easyvolcap.utils.console_utils import log, colored
from easyvolcap.utils.relight_utils import sample_envmap_image, read_hdr, Microfacet, linear2srgb, gen_light_xyz, gen_uniform_light_xyz
from easyvolcap.utils.net_utils import normalize, multi_gather, multi_scatter
from easyvolcap.utils.sh_utils import spher2cart, spherical_uniform_sampling_upper, spherical_uniform_sampling
from easyvolcap.utils.data_utils import save_image | 9,276 | zy = torch.stack(torch.meshgrid(torch.arange(H, device=args.device), torch.arange(W, device=args.device), indexing='ij'), dim=-1)
zy = zy - torch.tensor([H / 2, W / 2], device=zy.device)
zy = zy / torch.tensor(min(H, W) / 2, device=zy.device)
zy = zy.flip(dims=[0])
zy = zy.view(-1, 2) # H * W, 2
x = (1 - zy.pow(2).sum(-1)).sqrt()
x = x.nan_to_num(0)
zyx = torch.cat([zy, x[..., None]], dim=-1)
surf = zyx.flip(dims=[-1]) # H * W, 3
# Construct normal and material for the shading ball
C = torch.tensor([2, 0, 0], device=x.device) # simple perspecitve projection
norm = normalize(surf) # whatever for invalid regions
view = normalize(surf - C) # camera view direction
# Prepare mask for valid pixels
msk = (norm * view).sum(-1) < 0 # view direction and normal should be opposite
ind = msk.nonzero()
P = ind.shape[0]
surf = multi_gather(surf, ind) # get good pixels to shade on
view = multi_gather(view, ind) # get good pixels to shade on
norm = multi_gather(norm, ind) # get good pixels to shade on
def image_based_lighting(surf: torch.Tensor,
norm: torch.Tensor,
view: torch.Tensor,
probe: dotdict, # lighting
albedo: float, # scalar albedo
roughness: float, # scalar roughness
microfacet: Microfacet, # material
N: int = 1024, # number of samples
H: int = 16,
W: int = 32,
uniform: bool = True, # uniform or stratified sampling
perturb: bool = True,
):
# Generate sample_count uniformly and stratified samples over the sphere
P = surf.shape[0]
if uniform: # uniform random sampling
T = P * N
theta, phi = spherical_uniform_sampling_upper(T, device=surf.device) # T, T,
ray_d = spher2cart(theta, phi) # T, 3, z always bigger than zero
else: # stratified sampling
N = H * W
T = P * N
xyz, area = gen_light_xyz(H, W, device=surf.device)
if perturb:
R = torch.rand(3, 3, device=xyz.device)
Q, R = torch.linalg.qr(R) # 3, 3
xyz = xyz @ Q # apply random rotation
xyz, area = xyz.view(-1, 3), area.view(-1, 1)
ray_d = normalize(xyz) # T, 3
# Adding more samples seems to help, but not very good for low roughness surface (i.e. this implementation has a hard upper limit for specular surfaces)
# And the visibility's influence is not clear enough, you've only done ablation on one of the char in one of the novel lighting
# The physical correctness of distance field soft shadow is quesitonable
# __import__('ipdb').set_trace()
# torch.testing.assert_allclose(probe.view(-1, 3), sample_envmap_image(probe, ray_d))
xyz = xyz[:, None].expand(N, P, 3).reshape(-1, 3) # T, 3
area = area[:, None].expand(N, P, 1).reshape(-1, 1) # T, 1
ray_d = ray_d[:, None].expand(N, P, 3).reshape(-1, 3) # T, 3
# Preparing shapes
norm = norm[None].expand(N, P, 3).reshape(T, 3) # T, 3
view = view[None].expand(N, P, 3).reshape(T, 3) # T, 3
# Transform ray_d to be pointing upward from normal direction
if uniform:
R = torch.zeros([T, 3, 3], device=norm.device)
R[..., 0, 0] = 1.0
R[..., :3, 2] = norm # c2w, z axis is normal direction
R[..., :3, 1] = normalize(torch.cross(R[..., :3, 2], R[..., :3, 0]))
R[..., :3, 0] = normalize(torch.cross(R[..., :3, 1], R[..., :3, 2]))
ray_d = (R @ ray_d[..., None])[..., 0]
# Compute shading
ldot = (ray_d * norm).sum(dim=-1, keepdim=True) # T
light = sample_envmap_image(probe, ray_d)
brdf = microfacet(ray_d, -view, norm, albedo, roughness)
shading = light * ldot * brdf
# Apply area to normalize integration
if uniform:
shading = shading * 2.0 * torch.pi / N
else:
shading = shading * area
shading = shading.view(N, P, -1).sum(dim=-3)
return shading
microfacet = Microfacet(f0=fresnel)
if not args.stratified:
C = 2048
rgb: torch.Tensor = 0
for i in tqdm(range(0, N, C)):
CC = min(N, i + C) - i # the remaining chunk size (C or smaller)
shading = image_based_lighting(surf, norm, view,
probe, albedo, roughness, microfacet, CC)
rgb = (rgb + shading) # undo normalization and sum
rgb = rgb * CC / N # renormalization
else:
N = math.ceil(N / (args.env_h * args.env_w))
rgb: torch.Tensor = 0
for i in tqdm(range(0, N, 1)):
shading = image_based_lighting(surf, norm, view,
probe, albedo, roughness, microfacet,
H=args.env_h, W=args.env_w, uniform=False, perturb=N > 1)
rgb = (rgb + shading) # undo normalization and sum
rgb = rgb * 1 / N # renormalization
# Save rendered images
img = torch.zeros(H * W, 3, device=rgb.device)
img = multi_scatter(img, ind, rgb).view(H, W, 3)
| # this file is used for testing materials, thus should support large images files (8k maybe?)
# uniformly sample rays, no need for ray tracing since the ball should not be concerned with multi bounce shading
# currect techniques for static objects (articulated objects) including:
# 1. PRT (pre-computed radiance transfer), converts to a bunch of matrix dot product using spherical harmonics
# 2. Split-Sum for prefiltering the environment map and lighting, along with decomposed material node
# 3. Brute-Force shading with random sampling (this is what we're implementing) (used for ray tracing)
# fmt: off
sys.path.append('.')
# fmt: on
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=str, default='cuda')
parser.add_argument('--height', type=int, default=256)
parser.add_argument('--width', type=int, default=256)
parser.add_argument('--sample', type=int, default=10000, help='number of ray samples for the shaing results')
parser.add_argument('--fresnel', type=float, default=0.04)
parser.add_argument('--albedo', type=float, default=0.0) # maybe a file for albedo map?
parser.add_argument('--roughness', type=float, default=0.3) # maybe a file for roughness map?
parser.add_argument('--probe', type=str, default='data/lighting/8k/gym_entrance.hdr')
parser.add_argument('--output', type=str, default='data/shading_ball.png')
parser.add_argument('--stratified', action='store_true')
parser.add_argument('--env_h', type=int, default=16)
parser.add_argument('--env_w', type=int, default=32)
args = parser.parse_args()
# Prepare shapes
albedo, roughness, fresnel = args.albedo, args.roughness, args.fresnel
H, W, N = args.height, args.width, args.sample
log(f'Will produce a {colored(f"{H}, {W}", "magenta")} shading ball with {colored(f"{N}", "magenta")} samples for each pixel, albedo: {colored(str(albedo), "magenta")}, roughness: {colored(str(roughness), "magenta")}, fresnel: {colored(str(fresnel), "magenta")}')
# Loading environment map for shading computation
log(f'Loading environment map from {colored(args.probe, "blue")} onto {colored(args.device, "magenta")}')
probe = torch.from_numpy(read_hdr(args.probe)).to(args.device, non_blocking=True)
# Construct the coordinate of the shading ball
zy = torch.stack(torch.meshgrid(torch.arange(H, device=args.device), torch.arange(W, device=args.device), indexing='ij'), dim=-1)
zy = zy - torch.tensor([H / 2, W / 2], device=zy.device)
zy = zy / torch.tensor(min(H, W) / 2, device=zy.device)
zy = zy.flip(dims=[0])
zy = zy.view(-1, 2) # H * W, 2
x = (1 - zy.pow(2).sum(-1)).sqrt()
x = x.nan_to_num(0)
zyx = torch.cat([zy, x[..., None]], dim=-1)
surf = zyx.flip(dims=[-1]) # H * W, 3
# Construct normal and material for the shading ball
C = torch.tensor([2, 0, 0], device=x.device) # simple perspecitve projection
norm = normalize(surf) # whatever for invalid regions
view = normalize(surf - C) # camera view direction
# Prepare mask for valid pixels
msk = (norm * view).sum(-1) < 0 # view direction and normal should be opposite
ind = msk.nonzero()
P = ind.shape[0]
surf = multi_gather(surf, ind) # get good pixels to shade on
view = multi_gather(view, ind) # get good pixels to shade on
norm = multi_gather(norm, ind) # get good pixels to shade on
def image_based_lighting(surf: torch.Tensor,
norm: torch.Tensor,
view: torch.Tensor,
probe: dotdict, # lighting
albedo: float, # scalar albedo
roughness: float, # scalar roughness
microfacet: Microfacet, # material
N: int = 1024, # number of samples
H: int = 16,
W: int = 32,
uniform: bool = True, # uniform or stratified sampling
perturb: bool = True,
):
# Generate sample_count uniformly and stratified samples over the sphere
P = surf.shape[0]
if uniform: # uniform random sampling
T = P * N
theta, phi = spherical_uniform_sampling_upper(T, device=surf.device) # T, T,
ray_d = spher2cart(theta, phi) # T, 3, z always bigger than zero
else: # stratified sampling
N = H * W
T = P * N
xyz, area = gen_light_xyz(H, W, device=surf.device)
if perturb:
R = torch.rand(3, 3, device=xyz.device)
Q, R = torch.linalg.qr(R) # 3, 3
xyz = xyz @ Q # apply random rotation
xyz, area = xyz.view(-1, 3), area.view(-1, 1)
ray_d = normalize(xyz) # T, 3
# Adding more samples seems to help, but not very good for low roughness surface (i.e. this implementation has a hard upper limit for specular surfaces)
# And the visibility's influence is not clear enough, you've only done ablation on one of the char in one of the novel lighting
# The physical correctness of distance field soft shadow is quesitonable
# __import__('ipdb').set_trace()
# torch.testing.assert_allclose(probe.view(-1, 3), sample_envmap_image(probe, ray_d))
xyz = xyz[:, None].expand(N, P, 3).reshape(-1, 3) # T, 3
area = area[:, None].expand(N, P, 1).reshape(-1, 1) # T, 1
ray_d = ray_d[:, None].expand(N, P, 3).reshape(-1, 3) # T, 3
# Preparing shapes
norm = norm[None].expand(N, P, 3).reshape(T, 3) # T, 3
view = view[None].expand(N, P, 3).reshape(T, 3) # T, 3
# Transform ray_d to be pointing upward from normal direction
if uniform:
R = torch.zeros([T, 3, 3], device=norm.device)
R[..., 0, 0] = 1.0
R[..., :3, 2] = norm # c2w, z axis is normal direction
R[..., :3, 1] = normalize(torch.cross(R[..., :3, 2], R[..., :3, 0]))
R[..., :3, 0] = normalize(torch.cross(R[..., :3, 1], R[..., :3, 2]))
ray_d = (R @ ray_d[..., None])[..., 0]
# Compute shading
ldot = (ray_d * norm).sum(dim=-1, keepdim=True) # T
light = sample_envmap_image(probe, ray_d)
brdf = microfacet(ray_d, -view, norm, albedo, roughness)
shading = light * ldot * brdf
# Apply area to normalize integration
if uniform:
shading = shading * 2.0 * torch.pi / N
else:
shading = shading * area
shading = shading.view(N, P, -1).sum(dim=-3)
return shading
microfacet = Microfacet(f0=fresnel)
if not args.stratified:
C = 2048
rgb: torch.Tensor = 0
for i in tqdm(range(0, N, C)):
CC = min(N, i + C) - i # the remaining chunk size (C or smaller)
shading = image_based_lighting(surf, norm, view,
probe, albedo, roughness, microfacet, CC)
rgb = (rgb + shading) # undo normalization and sum
rgb = rgb * CC / N # renormalization
else:
N = math.ceil(N / (args.env_h * args.env_w))
rgb: torch.Tensor = 0
for i in tqdm(range(0, N, 1)):
shading = image_based_lighting(surf, norm, view,
probe, albedo, roughness, microfacet,
H=args.env_h, W=args.env_w, uniform=False, perturb=N > 1)
rgb = (rgb + shading) # undo normalization and sum
rgb = rgb * 1 / N # renormalization
# Save rendered images
img = torch.zeros(H * W, 3, device=rgb.device)
img = multi_scatter(img, ind, rgb).view(H, W, 3) | img = linear2srgb(img) | 5 | 2023-10-17 04:48:46+00:00 | 12k |
codefuse-ai/Test-Agent | chat/server/gradio_web_server_multi.py | [
{
"identifier": "SESSION_EXPIRATION_TIME",
"path": "chat/constants.py",
"snippet": "SESSION_EXPIRATION_TIME = 3600"
},
{
"identifier": "build_side_by_side_ui_anony",
"path": "chat/server/gradio_block_arena_anony.py",
"snippet": "def build_side_by_side_ui_anony(models):\n notice_markdo... | import argparse
import pickle
import time
import gradio as gr
from chat.constants import (
SESSION_EXPIRATION_TIME,
)
from chat.server.gradio_block_arena_anony import (
build_side_by_side_ui_anony,
load_demo_side_by_side_anony,
set_global_vars_anony,
)
from chat.server.gradio_block_arena_named import (
build_side_by_side_ui_named,
load_demo_side_by_side_named,
set_global_vars_named,
)
from chat.server.gradio_web_server import (
set_global_vars,
block_css,
build_single_model_ui,
get_model_list,
load_demo_single,
ip_expiration_dict,
)
from chat.server.monitor.monitor import build_leaderboard_tab
from chat.utils import (
build_logger,
get_window_url_params_js,
parse_gradio_auth_creds,
) | 7,348 | c_list = (
c_states
+ c_model_selectors
+ c_chatbots
+ [
c_textbox,
c_send_btn,
c_button_row,
c_button_row2,
c_parameter_row,
]
)
with gr.Tab("Single Model", id=2):
(
a_state,
a_model_selector,
a_chatbot,
a_textbox,
a_send_btn,
a_button_row,
a_parameter_row,
) = build_single_model_ui(models, add_promotion_links=True)
a_list = [
a_state,
a_model_selector,
a_chatbot,
a_textbox,
a_send_btn,
a_button_row,
a_parameter_row,
]
if elo_results_file:
with gr.Tab("Leaderboard", id=3):
build_leaderboard_tab(elo_results_file, leaderboard_table_file)
url_params = gr.JSON(visible=False)
if args.model_list_mode not in ["once", "reload"]:
raise ValueError(f"Unknown model list mode: {args.model_list_mode}")
demo.load(
load_demo,
[url_params],
[tabs] + a_list + b_list + c_list,
_js=get_window_url_params_js,
)
return demo
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="0.0.0.0")
parser.add_argument("--port", type=int)
parser.add_argument(
"--share",
action="store_true",
help="Whether to generate a public, shareable link.",
)
parser.add_argument(
"--controller-url",
type=str,
default="http://localhost:21001",
help="The address of the controller.",
)
parser.add_argument(
"--concurrency-count",
type=int,
default=10,
help="The concurrency count of the gradio queue.",
)
parser.add_argument(
"--model-list-mode",
type=str,
default="once",
choices=["once", "reload"],
help="Whether to load the model list once or reload the model list every time.",
)
parser.add_argument(
"--moderate", action="store_true", help="Enable content moderation"
)
parser.add_argument(
"--add-chatgpt",
action="store_true",
help="Add OpenAI's ChatGPT models (gpt-3.5-turbo, gpt-4)",
)
parser.add_argument(
"--add-claude",
action="store_true",
help="Add Anthropic's Claude models (claude-2, claude-instant-1)",
)
parser.add_argument(
"--add-palm",
action="store_true",
help="Add Google's PaLM model (PaLM 2 for Chat: chat-bison@001)",
)
parser.add_argument(
"--anony-only-for-proprietary-model",
action="store_true",
help="Only add ChatGPT, Claude, Bard under anony battle tab",
)
parser.add_argument(
"--register-openai-compatible-models",
type=str,
help="Register custom OpenAI API compatible models by loading them from a JSON file",
)
parser.add_argument(
"--gradio-auth-path",
type=str,
help='Set the gradio authentication file path. The file should contain one or more user:password pairs in this format: "u1:p1,u2:p2,u3:p3"',
default=None,
)
parser.add_argument("--elo-results-file", type=str)
parser.add_argument("--leaderboard-table-file", type=str)
args = parser.parse_args()
logger.info(f"args: {args}")
# Set global variables
set_global_vars(args.controller_url, args.moderate)
| """
The gradio demo server with multiple tabs.
It supports chatting with a single model or chatting with two models side-by-side.
"""
logger = build_logger("gradio_web_server_multi", "gradio_web_server_multi.log")
def load_demo(url_params, request: gr.Request):
global models
ip = request.client.host
logger.info(f"load_demo. ip: {ip}. params: {url_params}")
ip_expiration_dict[ip] = time.time() + SESSION_EXPIRATION_TIME
selected = 0
if "arena" in url_params:
selected = 0
elif "compare" in url_params:
selected = 1
elif "single" in url_params:
selected = 2
elif "leaderboard" in url_params:
selected = 3
if args.model_list_mode == "reload":
if args.anony_only_for_proprietary_model:
models = get_model_list(
args.controller_url,
args.register_openai_compatible_models,
False,
False,
False,
)
else:
models = get_model_list(
args.controller_url,
args.register_openai_compatible_models,
args.add_chatgpt,
args.add_claude,
args.add_palm,
)
single_updates = load_demo_single(models, url_params)
models_anony = list(models)
if args.anony_only_for_proprietary_model:
# Only enable these models in anony battles.
if args.add_chatgpt:
models_anony += ["gpt-4", "gpt-3.5-turbo"]
if args.add_claude:
models_anony += ["claude-2", "claude-instant-1"]
if args.add_palm:
models_anony += ["palm-2"]
side_by_side_anony_updates = load_demo_side_by_side_anony(models_anony, url_params)
side_by_side_named_updates = load_demo_side_by_side_named(models, url_params)
return (
(gr.Tabs.update(selected=selected),)
+ single_updates
+ side_by_side_anony_updates
+ side_by_side_named_updates
)
def build_demo(models, elo_results_file, leaderboard_table_file):
with gr.Blocks(
title="Chat with Open Large Language Models",
theme=gr.themes.Base(),
css=block_css,
) as demo:
with gr.Tabs() as tabs:
with gr.Tab("Chatbot Arena (battle)", id=0):
(
b_states,
b_model_selectors,
b_chatbots,
b_textbox,
b_send_btn,
b_button_row,
b_button_row2,
b_parameter_row,
) = build_side_by_side_ui_anony(models)
b_list = (
b_states
+ b_model_selectors
+ b_chatbots
+ [
b_textbox,
b_send_btn,
b_button_row,
b_button_row2,
b_parameter_row,
]
)
with gr.Tab("Chatbot Arena (side-by-side)", id=1):
(
c_states,
c_model_selectors,
c_chatbots,
c_textbox,
c_send_btn,
c_button_row,
c_button_row2,
c_parameter_row,
) = build_side_by_side_ui_named(models)
c_list = (
c_states
+ c_model_selectors
+ c_chatbots
+ [
c_textbox,
c_send_btn,
c_button_row,
c_button_row2,
c_parameter_row,
]
)
with gr.Tab("Single Model", id=2):
(
a_state,
a_model_selector,
a_chatbot,
a_textbox,
a_send_btn,
a_button_row,
a_parameter_row,
) = build_single_model_ui(models, add_promotion_links=True)
a_list = [
a_state,
a_model_selector,
a_chatbot,
a_textbox,
a_send_btn,
a_button_row,
a_parameter_row,
]
if elo_results_file:
with gr.Tab("Leaderboard", id=3):
build_leaderboard_tab(elo_results_file, leaderboard_table_file)
url_params = gr.JSON(visible=False)
if args.model_list_mode not in ["once", "reload"]:
raise ValueError(f"Unknown model list mode: {args.model_list_mode}")
demo.load(
load_demo,
[url_params],
[tabs] + a_list + b_list + c_list,
_js=get_window_url_params_js,
)
return demo
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="0.0.0.0")
parser.add_argument("--port", type=int)
parser.add_argument(
"--share",
action="store_true",
help="Whether to generate a public, shareable link.",
)
parser.add_argument(
"--controller-url",
type=str,
default="http://localhost:21001",
help="The address of the controller.",
)
parser.add_argument(
"--concurrency-count",
type=int,
default=10,
help="The concurrency count of the gradio queue.",
)
parser.add_argument(
"--model-list-mode",
type=str,
default="once",
choices=["once", "reload"],
help="Whether to load the model list once or reload the model list every time.",
)
parser.add_argument(
"--moderate", action="store_true", help="Enable content moderation"
)
parser.add_argument(
"--add-chatgpt",
action="store_true",
help="Add OpenAI's ChatGPT models (gpt-3.5-turbo, gpt-4)",
)
parser.add_argument(
"--add-claude",
action="store_true",
help="Add Anthropic's Claude models (claude-2, claude-instant-1)",
)
parser.add_argument(
"--add-palm",
action="store_true",
help="Add Google's PaLM model (PaLM 2 for Chat: chat-bison@001)",
)
parser.add_argument(
"--anony-only-for-proprietary-model",
action="store_true",
help="Only add ChatGPT, Claude, Bard under anony battle tab",
)
parser.add_argument(
"--register-openai-compatible-models",
type=str,
help="Register custom OpenAI API compatible models by loading them from a JSON file",
)
parser.add_argument(
"--gradio-auth-path",
type=str,
help='Set the gradio authentication file path. The file should contain one or more user:password pairs in this format: "u1:p1,u2:p2,u3:p3"',
default=None,
)
parser.add_argument("--elo-results-file", type=str)
parser.add_argument("--leaderboard-table-file", type=str)
args = parser.parse_args()
logger.info(f"args: {args}")
# Set global variables
set_global_vars(args.controller_url, args.moderate) | set_global_vars_named(args.moderate) | 6 | 2023-10-20 08:56:20+00:00 | 12k |
thuml/iTransformer | run.py | [
{
"identifier": "Exp_Long_Term_Forecast",
"path": "experiments/exp_long_term_forecasting.py",
"snippet": "class Exp_Long_Term_Forecast(Exp_Basic):\n def __init__(self, args):\n super(Exp_Long_Term_Forecast, self).__init__(args)\n\n def _build_model(self):\n model = self.model_dict[se... | import argparse
import torch
import random
import numpy as np
from experiments.exp_long_term_forecasting import Exp_Long_Term_Forecast
from experiments.exp_long_term_forecasting_partial import Exp_Long_Term_Forecast_Partial | 9,483 |
if __name__ == '__main__':
fix_seed = 2023
random.seed(fix_seed)
torch.manual_seed(fix_seed)
np.random.seed(fix_seed)
parser = argparse.ArgumentParser(description='iTransformer')
# basic config
parser.add_argument('--is_training', type=int, required=True, default=1, help='status')
parser.add_argument('--model_id', type=str, required=True, default='test', help='model id')
parser.add_argument('--model', type=str, required=True, default='iTransformer',
help='model name, options: [iTransformer, iInformer, iReformer, iFlowformer, iFlashformer]')
# data loader
parser.add_argument('--data', type=str, required=True, default='custom', help='dataset type')
parser.add_argument('--root_path', type=str, default='./data/electricity/', help='root path of the data file')
parser.add_argument('--data_path', type=str, default='electricity.csv', help='data csv file')
parser.add_argument('--features', type=str, default='M',
help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')
parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')
parser.add_argument('--freq', type=str, default='h',
help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')
parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')
# forecasting task
parser.add_argument('--seq_len', type=int, default=96, help='input sequence length')
parser.add_argument('--label_len', type=int, default=48, help='start token length') # no longer needed in inverted Transformers
parser.add_argument('--pred_len', type=int, default=96, help='prediction sequence length')
# model define
parser.add_argument('--enc_in', type=int, default=7, help='encoder input size')
parser.add_argument('--dec_in', type=int, default=7, help='decoder input size')
parser.add_argument('--c_out', type=int, default=7, help='output size') # applicable on arbitrary number of variates in inverted Transformers
parser.add_argument('--d_model', type=int, default=512, help='dimension of model')
parser.add_argument('--n_heads', type=int, default=8, help='num of heads')
parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers')
parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')
parser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn')
parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')
parser.add_argument('--factor', type=int, default=1, help='attn factor')
parser.add_argument('--distil', action='store_false',
help='whether to use distilling in encoder, using this argument means not using distilling',
default=True)
parser.add_argument('--dropout', type=float, default=0.1, help='dropout')
parser.add_argument('--embed', type=str, default='timeF',
help='time features encoding, options:[timeF, fixed, learned]')
parser.add_argument('--activation', type=str, default='gelu', help='activation')
parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')
parser.add_argument('--do_predict', action='store_true', help='whether to predict unseen future data')
# optimization
parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers')
parser.add_argument('--itr', type=int, default=1, help='experiments times')
parser.add_argument('--train_epochs', type=int, default=10, help='train epochs')
parser.add_argument('--batch_size', type=int, default=32, help='batch size of train input data')
parser.add_argument('--patience', type=int, default=3, help='early stopping patience')
parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate')
parser.add_argument('--des', type=str, default='test', help='exp description')
parser.add_argument('--loss', type=str, default='MSE', help='loss function')
parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate')
parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)
# GPU
parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')
parser.add_argument('--gpu', type=int, default=0, help='gpu')
parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False)
parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus')
# iTransformer
parser.add_argument('--exp_name', type=str, required=False, default='MTSF',
help='experiemnt name, options:[MTSF, partial_train]')
parser.add_argument('--channel_independence', type=bool, default=False, help='whether to use channel_independence mechanism')
parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)
parser.add_argument('--class_strategy', type=str, default='projection', help='projection/average/cls_token')
parser.add_argument('--target_root_path', type=str, default='./data/electricity/', help='root path of the data file')
parser.add_argument('--target_data_path', type=str, default='electricity.csv', help='data file')
parser.add_argument('--efficient_training', type=bool, default=False, help='whether to use efficient_training (exp_name should be partial train)') # See Figure 8 of our paper for the detail
parser.add_argument('--use_norm', type=int, default=True, help='use norm and denorm')
parser.add_argument('--partial_start_index', type=int, default=0, help='the start index of variates for partial training, '
'you can select [partial_start_index, min(enc_in + partial_start_index, N)]')
args = parser.parse_args()
args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False
if args.use_gpu and args.use_multi_gpu:
args.devices = args.devices.replace(' ', '')
device_ids = args.devices.split(',')
args.device_ids = [int(id_) for id_ in device_ids]
args.gpu = args.device_ids[0]
print('Args in experiment:')
print(args)
if args.exp_name == 'partial_train': # See Figure 8 of our paper, for the detail
Exp = Exp_Long_Term_Forecast_Partial
else: # MTSF: multivariate time series forecasting
|
if __name__ == '__main__':
fix_seed = 2023
random.seed(fix_seed)
torch.manual_seed(fix_seed)
np.random.seed(fix_seed)
parser = argparse.ArgumentParser(description='iTransformer')
# basic config
parser.add_argument('--is_training', type=int, required=True, default=1, help='status')
parser.add_argument('--model_id', type=str, required=True, default='test', help='model id')
parser.add_argument('--model', type=str, required=True, default='iTransformer',
help='model name, options: [iTransformer, iInformer, iReformer, iFlowformer, iFlashformer]')
# data loader
parser.add_argument('--data', type=str, required=True, default='custom', help='dataset type')
parser.add_argument('--root_path', type=str, default='./data/electricity/', help='root path of the data file')
parser.add_argument('--data_path', type=str, default='electricity.csv', help='data csv file')
parser.add_argument('--features', type=str, default='M',
help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')
parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')
parser.add_argument('--freq', type=str, default='h',
help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')
parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')
# forecasting task
parser.add_argument('--seq_len', type=int, default=96, help='input sequence length')
parser.add_argument('--label_len', type=int, default=48, help='start token length') # no longer needed in inverted Transformers
parser.add_argument('--pred_len', type=int, default=96, help='prediction sequence length')
# model define
parser.add_argument('--enc_in', type=int, default=7, help='encoder input size')
parser.add_argument('--dec_in', type=int, default=7, help='decoder input size')
parser.add_argument('--c_out', type=int, default=7, help='output size') # applicable on arbitrary number of variates in inverted Transformers
parser.add_argument('--d_model', type=int, default=512, help='dimension of model')
parser.add_argument('--n_heads', type=int, default=8, help='num of heads')
parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers')
parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')
parser.add_argument('--d_ff', type=int, default=2048, help='dimension of fcn')
parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')
parser.add_argument('--factor', type=int, default=1, help='attn factor')
parser.add_argument('--distil', action='store_false',
help='whether to use distilling in encoder, using this argument means not using distilling',
default=True)
parser.add_argument('--dropout', type=float, default=0.1, help='dropout')
parser.add_argument('--embed', type=str, default='timeF',
help='time features encoding, options:[timeF, fixed, learned]')
parser.add_argument('--activation', type=str, default='gelu', help='activation')
parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')
parser.add_argument('--do_predict', action='store_true', help='whether to predict unseen future data')
# optimization
parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers')
parser.add_argument('--itr', type=int, default=1, help='experiments times')
parser.add_argument('--train_epochs', type=int, default=10, help='train epochs')
parser.add_argument('--batch_size', type=int, default=32, help='batch size of train input data')
parser.add_argument('--patience', type=int, default=3, help='early stopping patience')
parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate')
parser.add_argument('--des', type=str, default='test', help='exp description')
parser.add_argument('--loss', type=str, default='MSE', help='loss function')
parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate')
parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)
# GPU
parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')
parser.add_argument('--gpu', type=int, default=0, help='gpu')
parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False)
parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus')
# iTransformer
parser.add_argument('--exp_name', type=str, required=False, default='MTSF',
help='experiemnt name, options:[MTSF, partial_train]')
parser.add_argument('--channel_independence', type=bool, default=False, help='whether to use channel_independence mechanism')
parser.add_argument('--inverse', action='store_true', help='inverse output data', default=False)
parser.add_argument('--class_strategy', type=str, default='projection', help='projection/average/cls_token')
parser.add_argument('--target_root_path', type=str, default='./data/electricity/', help='root path of the data file')
parser.add_argument('--target_data_path', type=str, default='electricity.csv', help='data file')
parser.add_argument('--efficient_training', type=bool, default=False, help='whether to use efficient_training (exp_name should be partial train)') # See Figure 8 of our paper for the detail
parser.add_argument('--use_norm', type=int, default=True, help='use norm and denorm')
parser.add_argument('--partial_start_index', type=int, default=0, help='the start index of variates for partial training, '
'you can select [partial_start_index, min(enc_in + partial_start_index, N)]')
args = parser.parse_args()
args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False
if args.use_gpu and args.use_multi_gpu:
args.devices = args.devices.replace(' ', '')
device_ids = args.devices.split(',')
args.device_ids = [int(id_) for id_ in device_ids]
args.gpu = args.device_ids[0]
print('Args in experiment:')
print(args)
if args.exp_name == 'partial_train': # See Figure 8 of our paper, for the detail
Exp = Exp_Long_Term_Forecast_Partial
else: # MTSF: multivariate time series forecasting | Exp = Exp_Long_Term_Forecast | 0 | 2023-10-19 03:23:15+00:00 | 12k |
kylesargent/ZeroNVS | threestudio/models/geometry/base.py | [
{
"identifier": "IsosurfaceHelper",
"path": "threestudio/models/isosurface.py",
"snippet": "class IsosurfaceHelper(nn.Module):\n points_range: Tuple[float, float] = (0, 1)\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"N 3\"]:\n raise NotImplementedError"
},
{
"identi... | from dataclasses import dataclass, field
from threestudio.models.isosurface import (
IsosurfaceHelper,
MarchingCubeCPUHelper,
MarchingTetrahedraHelper,
)
from threestudio.models.mesh import Mesh
from threestudio.utils.base import BaseModule
from threestudio.utils.ops import chunk_batch, scale_tensor
from threestudio.utils.typing import *
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import threestudio | 7,687 |
def contract_to_unisphere(
x: Float[Tensor, "... 3"], bbox: Float[Tensor, "2 3"], unbounded: bool = False
) -> Float[Tensor, "... 3"]:
if unbounded:
# import pdb
# pdb.set_trace()
x = scale_tensor(x, bbox, (0, 1))
x = x * 2 - 1 # aabb is at [-1, 1]
mag = x.norm(dim=-1, keepdim=True)
mask = mag.squeeze(-1) > 1
x = x.clone()
x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])
x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]
else:
x = scale_tensor(x, bbox, (0, 1))
return x
class BaseGeometry(BaseModule):
@dataclass
class Config(BaseModule.Config):
pass
cfg: Config
@staticmethod
def create_from(
other: "BaseGeometry", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs
) -> "BaseGeometry":
raise TypeError(
f"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}"
)
def export(self, *args, **kwargs) -> Dict[str, Any]:
return {}
class BaseImplicitGeometry(BaseGeometry):
@dataclass
class Config(BaseGeometry.Config):
radius: float = 1.0
isosurface: bool = True
isosurface_method: str = "mt"
isosurface_resolution: int = 128
isosurface_threshold: Union[float, str] = 0.0
isosurface_chunk: int = 0
isosurface_coarse_to_fine: bool = True
isosurface_deformable_grid: bool = False
isosurface_remove_outliers: bool = True
isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01
cfg: Config
def configure(self) -> None:
self.bbox: Float[Tensor, "2 3"]
self.register_buffer(
"bbox",
torch.as_tensor(
[
[-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],
[self.cfg.radius, self.cfg.radius, self.cfg.radius],
],
dtype=torch.float32,
),
)
self.isosurface_helper: Optional[IsosurfaceHelper] = None
self.unbounded: bool = True
def _initilize_isosurface_helper(self):
if self.cfg.isosurface and self.isosurface_helper is None:
if self.cfg.isosurface_method == "mc-cpu":
self.isosurface_helper = MarchingCubeCPUHelper(
self.cfg.isosurface_resolution
).to(self.device)
elif self.cfg.isosurface_method == "mt":
|
def contract_to_unisphere(
x: Float[Tensor, "... 3"], bbox: Float[Tensor, "2 3"], unbounded: bool = False
) -> Float[Tensor, "... 3"]:
if unbounded:
# import pdb
# pdb.set_trace()
x = scale_tensor(x, bbox, (0, 1))
x = x * 2 - 1 # aabb is at [-1, 1]
mag = x.norm(dim=-1, keepdim=True)
mask = mag.squeeze(-1) > 1
x = x.clone()
x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])
x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]
else:
x = scale_tensor(x, bbox, (0, 1))
return x
class BaseGeometry(BaseModule):
@dataclass
class Config(BaseModule.Config):
pass
cfg: Config
@staticmethod
def create_from(
other: "BaseGeometry", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs
) -> "BaseGeometry":
raise TypeError(
f"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}"
)
def export(self, *args, **kwargs) -> Dict[str, Any]:
return {}
class BaseImplicitGeometry(BaseGeometry):
@dataclass
class Config(BaseGeometry.Config):
radius: float = 1.0
isosurface: bool = True
isosurface_method: str = "mt"
isosurface_resolution: int = 128
isosurface_threshold: Union[float, str] = 0.0
isosurface_chunk: int = 0
isosurface_coarse_to_fine: bool = True
isosurface_deformable_grid: bool = False
isosurface_remove_outliers: bool = True
isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01
cfg: Config
def configure(self) -> None:
self.bbox: Float[Tensor, "2 3"]
self.register_buffer(
"bbox",
torch.as_tensor(
[
[-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],
[self.cfg.radius, self.cfg.radius, self.cfg.radius],
],
dtype=torch.float32,
),
)
self.isosurface_helper: Optional[IsosurfaceHelper] = None
self.unbounded: bool = True
def _initilize_isosurface_helper(self):
if self.cfg.isosurface and self.isosurface_helper is None:
if self.cfg.isosurface_method == "mc-cpu":
self.isosurface_helper = MarchingCubeCPUHelper(
self.cfg.isosurface_resolution
).to(self.device)
elif self.cfg.isosurface_method == "mt": | self.isosurface_helper = MarchingTetrahedraHelper( | 2 | 2023-10-24 19:02:44+00:00 | 12k |
princeton-nlp/LLM-Shearing | llmshearing/models/composer_pythia.py | [
{
"identifier": "L0Module",
"path": "llmshearing/models/l0_module.py",
"snippet": "class L0Module(nn.Module):\n def __init__(self, cfg, device):\n super(L0Module, self).__init__()\n\n # base and target model info\n n_matrix_mlp = 2 if \"pythia\" in cfg.name else 3\n self.b... | import math
import torch
import torch.nn as nn
from typing import List, Optional, Tuple
from einops import rearrange
from omegaconf import DictConfig
from torch.nn import functional as F
from transformers.pytorch_utils import (find_pruneable_heads_and_indices,
prune_linear_layer)
from llmshearing.models.l0_module import L0Module
from llmshearing.models.composer_llama import ComposerMosaicLlama, prepare_decoder_attention_mask, turn_head_z, turn_mlp_z, normal_attn_fn, flash_attn_fn
from transformers.models.gpt_neox.modeling_gpt_neox import apply_rotary_pos_emb | 8,487 |
class ComposerMosaicPythia(ComposerMosaicLlama):
def __init__(self, cfg):
super().__init__(cfg)
self.model = PythiaModel(cfg)
class CoFiLayerNorm(torch.nn.LayerNorm):
def __init__(self, normalized_shape, eps: float = 1e-5, elementwise_affine: bool = True, device=None) -> None:
super().__init__(normalized_shape, eps, elementwise_affine, device)
def forward(self, input, hidden_z=None):
if hidden_z is not None:
remaining_index = torch.where(~hidden_z.eq(0))[0]
compressed_input = torch.index_select(
input, dim=-1, index=remaining_index)
compressed_weight = self.weight[remaining_index]
compressed_bias = self.bias[remaining_index]
normalized_shape = len(remaining_index)
normed_input = F.layer_norm(
compressed_input, [normalized_shape], compressed_weight, compressed_bias, self.eps)
output = input.clone()
normed_input = normed_input.to(output.dtype)
output[..., remaining_index] = normed_input
else:
output = F.layer_norm(
input, self.normalized_shape, self.weight, self.bias, self.eps)
return output
def prune_params(self, hidden_z):
remaining_index = torch.where(~hidden_z.eq(0))[0]
# self.weight = torch.nn.Parameter(self.weight.data.mul(hidden_z.squeeze())[remaining_index])
self.weight = torch.nn.parameter.Parameter(self.weight.index_select(0, remaining_index))
self.bias = torch.nn.parameter.Parameter(self.bias.index_select(0, remaining_index))
self.normalized_shape = (len(remaining_index),)
class PythiaEmbedding(nn.Embedding):
def forward(self, input, hidden_z=None):
embeddings = super().forward(input)
if hidden_z is not None:
embeddings = embeddings.mul(hidden_z)
return embeddings
def prune_params(self, hidden_z):
remaining_index = torch.where(~hidden_z.eq(0))[0]
self.weight.data = self.weight.data.mul(hidden_z)
self.weight = torch.nn.parameter.Parameter(self.weight.index_select(1, remaining_index).clone())
self.embedding_dim = len(remaining_index)
print(f" Embedding: {len(hidden_z)} -> {len(remaining_index)}")
class PythiaModel(nn.Module):
def __init__(self, cfg: DictConfig):
super().__init__()
print(f'Tried to build Pythia model with cfg.name={cfg.name}')
self.cfg = cfg
### added ###
self.l0_module = None
if getattr(self.cfg, "l0_module", None) is not None:
|
class ComposerMosaicPythia(ComposerMosaicLlama):
def __init__(self, cfg):
super().__init__(cfg)
self.model = PythiaModel(cfg)
class CoFiLayerNorm(torch.nn.LayerNorm):
def __init__(self, normalized_shape, eps: float = 1e-5, elementwise_affine: bool = True, device=None) -> None:
super().__init__(normalized_shape, eps, elementwise_affine, device)
def forward(self, input, hidden_z=None):
if hidden_z is not None:
remaining_index = torch.where(~hidden_z.eq(0))[0]
compressed_input = torch.index_select(
input, dim=-1, index=remaining_index)
compressed_weight = self.weight[remaining_index]
compressed_bias = self.bias[remaining_index]
normalized_shape = len(remaining_index)
normed_input = F.layer_norm(
compressed_input, [normalized_shape], compressed_weight, compressed_bias, self.eps)
output = input.clone()
normed_input = normed_input.to(output.dtype)
output[..., remaining_index] = normed_input
else:
output = F.layer_norm(
input, self.normalized_shape, self.weight, self.bias, self.eps)
return output
def prune_params(self, hidden_z):
remaining_index = torch.where(~hidden_z.eq(0))[0]
# self.weight = torch.nn.Parameter(self.weight.data.mul(hidden_z.squeeze())[remaining_index])
self.weight = torch.nn.parameter.Parameter(self.weight.index_select(0, remaining_index))
self.bias = torch.nn.parameter.Parameter(self.bias.index_select(0, remaining_index))
self.normalized_shape = (len(remaining_index),)
class PythiaEmbedding(nn.Embedding):
def forward(self, input, hidden_z=None):
embeddings = super().forward(input)
if hidden_z is not None:
embeddings = embeddings.mul(hidden_z)
return embeddings
def prune_params(self, hidden_z):
remaining_index = torch.where(~hidden_z.eq(0))[0]
self.weight.data = self.weight.data.mul(hidden_z)
self.weight = torch.nn.parameter.Parameter(self.weight.index_select(1, remaining_index).clone())
self.embedding_dim = len(remaining_index)
print(f" Embedding: {len(hidden_z)} -> {len(remaining_index)}")
class PythiaModel(nn.Module):
def __init__(self, cfg: DictConfig):
super().__init__()
print(f'Tried to build Pythia model with cfg.name={cfg.name}')
self.cfg = cfg
### added ###
self.l0_module = None
if getattr(self.cfg, "l0_module", None) is not None: | self.l0_module = L0Module(self.cfg, device=cfg.init_device) | 0 | 2023-10-16 12:26:08+00:00 | 12k |
hkchengrex/Cutie | process_video.py | [
{
"identifier": "CUTIE",
"path": "cutie/model/cutie.py",
"snippet": "class CUTIE(nn.Module):\n def __init__(self, cfg: DictConfig, *, single_object=False):\n super().__init__()\n model_cfg = cfg.model\n self.ms_dims = model_cfg.pixel_encoder.ms_dims\n self.key_dim = model_... | from os import path, listdir
from omegaconf import DictConfig, open_dict
from hydra import compose, initialize
from cutie.model.cutie import CUTIE
from cutie.inference.inference_core import InferenceCore
from cutie.inference.utils.results_utils import ResultSaver
from tqdm import tqdm
from time import perf_counter
from gui.interactive_utils import image_to_torch, index_numpy_to_one_hot_torch
from PIL import Image
from argparse import ArgumentParser
import torch
import cv2
import numpy as np | 7,943 |
def process_video(cfg: DictConfig):
# general setup
torch.set_grad_enabled(False)
if cfg['device'] == 'cuda' and torch.cuda.is_available():
device = 'cuda'
elif cfg['device'] == 'mps' and torch.backends.mps.is_available():
device = 'mps'
else:
device = 'cpu'
print(f'Using device: {device}')
use_amp = cfg.amp
# Load the network weights
print(f'Loading Cutie and weights')
cutie = CUTIE(cfg).to(device).eval()
if cfg.weights is not None:
model_weights = torch.load(cfg.weights, map_location=device)
cutie.load_weights(model_weights)
else:
print('No model weights loaded. Are you sure about this?')
# Open video
video = cfg['video']
if video is None:
print('No video defined. Please specify!')
exit()
video_name = path.splitext(video)[0]
print(f'Opening video {video}')
cap = cv2.VideoCapture(video)
if not cap.isOpened():
print(f'Unable to open video {video}!')
exit()
total_frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# Initial mask handling
mask_dir = cfg['mask_dir']
if mask_dir is None:
print('No mask_dir defined. Please specify!')
exit()
# determine if the mask uses 3-channel long ID or 1-channel (0~255) short ID
all_mask_frames = sorted(listdir(mask_dir))
first_mask_frame = all_mask_frames[0]
first_mask = Image.open(path.join(mask_dir, first_mask_frame))
if first_mask.mode == 'P':
use_long_id = False
palette = first_mask.getpalette()
elif first_mask.mode == 'RGB':
use_long_id = True
palette = None
elif first_mask.mode == 'L':
use_long_id = False
palette = None
else:
print(f'Unknown mode {first_mask.mode} in {first_mask_frame}.')
exit()
num_objects = cfg['num_objects']
if num_objects is None or num_objects < 1:
num_objects = len(np.unique(first_mask)) - 1
|
def process_video(cfg: DictConfig):
# general setup
torch.set_grad_enabled(False)
if cfg['device'] == 'cuda' and torch.cuda.is_available():
device = 'cuda'
elif cfg['device'] == 'mps' and torch.backends.mps.is_available():
device = 'mps'
else:
device = 'cpu'
print(f'Using device: {device}')
use_amp = cfg.amp
# Load the network weights
print(f'Loading Cutie and weights')
cutie = CUTIE(cfg).to(device).eval()
if cfg.weights is not None:
model_weights = torch.load(cfg.weights, map_location=device)
cutie.load_weights(model_weights)
else:
print('No model weights loaded. Are you sure about this?')
# Open video
video = cfg['video']
if video is None:
print('No video defined. Please specify!')
exit()
video_name = path.splitext(video)[0]
print(f'Opening video {video}')
cap = cv2.VideoCapture(video)
if not cap.isOpened():
print(f'Unable to open video {video}!')
exit()
total_frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# Initial mask handling
mask_dir = cfg['mask_dir']
if mask_dir is None:
print('No mask_dir defined. Please specify!')
exit()
# determine if the mask uses 3-channel long ID or 1-channel (0~255) short ID
all_mask_frames = sorted(listdir(mask_dir))
first_mask_frame = all_mask_frames[0]
first_mask = Image.open(path.join(mask_dir, first_mask_frame))
if first_mask.mode == 'P':
use_long_id = False
palette = first_mask.getpalette()
elif first_mask.mode == 'RGB':
use_long_id = True
palette = None
elif first_mask.mode == 'L':
use_long_id = False
palette = None
else:
print(f'Unknown mode {first_mask.mode} in {first_mask_frame}.')
exit()
num_objects = cfg['num_objects']
if num_objects is None or num_objects < 1:
num_objects = len(np.unique(first_mask)) - 1
| processor = InferenceCore(cutie, cfg=cfg) | 1 | 2023-10-19 17:49:24+00:00 | 12k |
MolecularAI/REINVENT4 | tests/chemistry/library_design/test_fragment_reactions_slice_enumerator.py | [
{
"identifier": "Conversions",
"path": "reinvent/chemistry/conversions.py",
"snippet": "class Conversions:\n @staticmethod\n def smiles_to_mols_and_indices(query_smiles: List[str]) -> Tuple[List[Mol], List[int]]:\n mols = [MolFromSmiles(smile) for smile in query_smiles]\n valid_mask ... | import unittest
from reinvent.chemistry import Conversions
from reinvent.chemistry.library_design import (
FragmentReactionSliceEnumerator,
BondMaker,
AttachmentPoints,
)
from reinvent.chemistry.library_design.dtos import FilteringConditionDTO
from reinvent.chemistry.library_design.enums import MolecularDescriptorsEnum
from reinvent.chemistry.library_design.fragment_reactions import FragmentReactions
from tests.chemistry.library_design.fixtures import FRAGMENT_REACTION_SUZUKI, FRAGMENT_REACTIONS
from tests.chemistry.fixtures.test_data import CELECOXIB | 9,913 |
class TestSingleFragmentReactionsSliceEnumerator(unittest.TestCase):
def setUp(self):
self.chemistry = Conversions()
self.reactions = FragmentReactions()
self._bond_maker = BondMaker()
self._attachment_points = AttachmentPoints()
self._suzuki_reaction_dto_list = self.reactions.create_reactions_from_smirks(
|
class TestSingleFragmentReactionsSliceEnumerator(unittest.TestCase):
def setUp(self):
self.chemistry = Conversions()
self.reactions = FragmentReactions()
self._bond_maker = BondMaker()
self._attachment_points = AttachmentPoints()
self._suzuki_reaction_dto_list = self.reactions.create_reactions_from_smirks( | FRAGMENT_REACTION_SUZUKI | 7 | 2023-10-20 06:43:16+00:00 | 12k |
jhejna/cpl | research/algs/off_policy_algorithm.py | [
{
"identifier": "ReplayBuffer",
"path": "research/datasets/replay_buffer/buffer.py",
"snippet": "class ReplayBuffer(torch.utils.data.IterableDataset):\n \"\"\"\n Generic Replay Buffer Class.\n\n This class adheres to the following conventions to support multiprocessing:\n 1. Variables/functi... | import datetime
import functools
import os
import sys
import tempfile
import gym
import numpy as np
import torch
from abc import abstractmethod
from typing import Any, Dict, Optional, Union
from research.datasets import ReplayBuffer
from research.datasets.replay_buffer import storage
from research.envs.base import EmptyEnv
from research.networks.base import ModuleContainer
from research.utils import runners, utils
from .base import Algorithm
from research.utils.config import Config | 9,758 |
class OffPolicyAlgorithm(Algorithm):
def __init__(
self,
*args,
offline_steps: int = 0, # Run fully offline by setting to -1
random_steps: int = 1000,
async_runner_ep_lag: int = 1,
**kwargs,
):
super().__init__(*args, **kwargs)
self.offline_steps = offline_steps
self.random_steps = random_steps
self.async_runner_ep_lag = async_runner_ep_lag
def setup_datasets(self, env: gym.Env, total_steps: int):
super().setup_datasets(env, total_steps)
# Assign the correct update function based on what is passed in.
|
class OffPolicyAlgorithm(Algorithm):
def __init__(
self,
*args,
offline_steps: int = 0, # Run fully offline by setting to -1
random_steps: int = 1000,
async_runner_ep_lag: int = 1,
**kwargs,
):
super().__init__(*args, **kwargs)
self.offline_steps = offline_steps
self.random_steps = random_steps
self.async_runner_ep_lag = async_runner_ep_lag
def setup_datasets(self, env: gym.Env, total_steps: int):
super().setup_datasets(env, total_steps)
# Assign the correct update function based on what is passed in. | if env is None or isinstance(env, EmptyEnv) or self.offline_steps < 0: | 2 | 2023-10-19 17:25:45+00:00 | 12k |
nbasyl/LLM-FP4 | configs/FPQ_baseline_config_llama.py | [
{
"identifier": "FPPTQSLBatchingQuantLinear_fpq_baseline",
"path": "quant_layers/fp_linear.py",
"snippet": "class FPPTQSLBatchingQuantLinear_fpq_baseline(FPPTQSLQuantLinear):\n def __init__(self, \n in_features: int,\n out_features: int,\n bias: bool = True,\n mode = \"raw... | from quant_layers.fp_linear import FPPTQSLBatchingQuantLinear_fpq_baseline
from quant_layers.fp_embed import FPPTQSLQuantEmbedding_fpq_baseline | 9,607 |
bit = 8
exp_bit = 4
embed_name_list = ["qembedding"]
fc_name_list = [ "qlinear_query", "qlinear_key", "qlinear_value", "qlinear_o","qlinear_gate","qlinear_down","qlinear_up","qlinear_score"]
matmul_name_list = [ "qmatmul_qk", "qmatmul_scorev"]
w_bit = {name: bit for name in fc_name_list}
a_bit = {name: bit for name in fc_name_list}
embed_bit = {name: bit for name in embed_name_list}
A_bit = {name: bit for name in matmul_name_list}
B_bit = {name: bit for name in matmul_name_list}
w_exp_bit = {name: exp_bit for name in fc_name_list}
a_exp_bit = {name: exp_bit for name in fc_name_list}
embed_exp_bit = {name: exp_bit for name in embed_name_list}
A_exp_bit = {name: exp_bit for name in matmul_name_list}
B_exp_bit = {name: exp_bit for name in matmul_name_list}
ptqsl_embedding_kwargs = {
"metric": "L2_norm",
"eq_alpha": 0.01,
"eq_beta": 1.2,
"eq_n": 100,
'search_round': 3,
"n_V": 1,
"n_H": 1
}
ptqsl_linear_kwargs = {
"metric": "L2_norm",
"eq_alpha": 0.01,
"eq_beta": 1.2,
"eq_n": 100,
'search_round': 3,
"n_V": 1,
"n_H": 1,
"n_a": 1,
"bias_correction":True # Conventionally I'll not add an actual bias correction in linear
}
def get_module(module_type, *args, **kwargs):
if "embedding" in module_type:
kwargs.update(ptqsl_embedding_kwargs)
|
bit = 8
exp_bit = 4
embed_name_list = ["qembedding"]
fc_name_list = [ "qlinear_query", "qlinear_key", "qlinear_value", "qlinear_o","qlinear_gate","qlinear_down","qlinear_up","qlinear_score"]
matmul_name_list = [ "qmatmul_qk", "qmatmul_scorev"]
w_bit = {name: bit for name in fc_name_list}
a_bit = {name: bit for name in fc_name_list}
embed_bit = {name: bit for name in embed_name_list}
A_bit = {name: bit for name in matmul_name_list}
B_bit = {name: bit for name in matmul_name_list}
w_exp_bit = {name: exp_bit for name in fc_name_list}
a_exp_bit = {name: exp_bit for name in fc_name_list}
embed_exp_bit = {name: exp_bit for name in embed_name_list}
A_exp_bit = {name: exp_bit for name in matmul_name_list}
B_exp_bit = {name: exp_bit for name in matmul_name_list}
ptqsl_embedding_kwargs = {
"metric": "L2_norm",
"eq_alpha": 0.01,
"eq_beta": 1.2,
"eq_n": 100,
'search_round': 3,
"n_V": 1,
"n_H": 1
}
ptqsl_linear_kwargs = {
"metric": "L2_norm",
"eq_alpha": 0.01,
"eq_beta": 1.2,
"eq_n": 100,
'search_round': 3,
"n_V": 1,
"n_H": 1,
"n_a": 1,
"bias_correction":True # Conventionally I'll not add an actual bias correction in linear
}
def get_module(module_type, *args, **kwargs):
if "embedding" in module_type:
kwargs.update(ptqsl_embedding_kwargs) | module= FPPTQSLQuantEmbedding_fpq_baseline(*args,**kwargs,bit= embed_bit[module_type], exponent_bit=embed_exp_bit[module_type], padding_idx=0) | 1 | 2023-10-15 06:05:13+00:00 | 12k |
bcmi/libcom | libcom/shadow_generation/source/PostProcessModel.py | [
{
"identifier": "ControlLDM",
"path": "libcom/shadow_generation/source/cldm/cldm.py",
"snippet": "class ControlLDM(LatentDiffusion):\n\n def __init__(self, control_stage_config, control_key, only_mid_control, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.control_model = i... | from torch import nn
from .cldm.cldm import ControlLDM
from .cldm.model import create_model, load_state_dict
from torch.utils.data import DataLoader
from .cldm.logger import PostProcessLogger
from PIL import Image
from libcom.shadow_generation.source.ldm.modules.diffusionmodules.openaimodel import (ResBlock, TimestepEmbedSequential, AttentionBlock,
Upsample, SpatialTransformer, Downsample)
from libcom.shadow_generation.source.ldm.modules.diffusionmodules.util import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
)
from libcom.shadow_generation.source.ldm.util import exists
import torch
import pytorch_lightning as pl
import os
import cv2
import numpy as np | 7,271 |
if num_head_channels == -1:
dim_head = ch // num_heads
else:
num_heads = ch // num_head_channels
dim_head = num_head_channels
if legacy:
#num_heads = 1
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=dim_head,
use_new_attention_order=use_new_attention_order,
) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,
use_checkpoint=use_checkpoint
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
self.output_blocks1 = nn.ModuleList([])
self.output_blocks2 = nn.ModuleList([])
for level, mult in list(enumerate(channel_mult))[::-1]:
for i in range(self.num_res_blocks[level] + 1):
ich = input_block_chans.pop()
layers1 = [
ResBlock(
ch + ich,
time_embed_dim,
dropout,
out_channels=model_channels * mult,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
layers2 = [
ResBlock(
ch + ich,
time_embed_dim,
dropout,
out_channels=model_channels * mult,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = model_channels * mult
if ds in attention_resolutions:
if num_head_channels == -1:
dim_head = ch // num_heads
else:
num_heads = ch // num_head_channels
dim_head = num_head_channels
if legacy:
#num_heads = 1
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
if exists(disable_self_attentions):
disabled_sa = disable_self_attentions[level]
else:
disabled_sa = False
if not exists(num_attention_blocks) or i < num_attention_blocks[level]:
layers1.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads_upsample,
num_head_channels=dim_head,
use_new_attention_order=use_new_attention_order,
) if not use_spatial_transformer else SpatialTransformer(
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
use_checkpoint=use_checkpoint
)
)
layers2.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads_upsample,
num_head_channels=dim_head,
use_new_attention_order=use_new_attention_order,
) if not use_spatial_transformer else SpatialTransformer(
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
use_checkpoint=use_checkpoint
)
)
if level and i == self.num_res_blocks[level]:
out_ch = ch
layers1.append(ResBlock(ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
up=True,
) if resblock_updown else
| # from share import *
class Post_Process_Net(nn.Module):
def __init__(
self,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
num_classes=None,
use_checkpoint=False,
use_fp16=False,
num_heads=-1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
resblock_updown=False,
use_new_attention_order=False,
use_spatial_transformer=False, # custom transformer support
transformer_depth=1, # custom transformer support
context_dim=None, # custom transformer support
n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
legacy=True,
disable_self_attentions=None,
num_attention_blocks=None,
disable_middle_self_attn=False,
use_linear_in_transformer=False,
):
super().__init__()
self.image_size = image_size
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
if isinstance(num_res_blocks, int):
self.num_res_blocks = len(channel_mult) * [num_res_blocks]
else:
if len(num_res_blocks) != len(channel_mult):
raise ValueError("provide num_res_blocks either as an int (globally constant) or "
"as a list/tuple (per-level) with the same length as channel_mult")
self.num_res_blocks = num_res_blocks
if disable_self_attentions is not None:
# should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not
assert len(disable_self_attentions) == len(channel_mult)
if num_attention_blocks is not None:
assert len(num_attention_blocks) == len(self.num_res_blocks)
assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))
print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. "
f"This option has LESS priority than attention_resolutions {attention_resolutions}, "
f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, "
f"attention will still not be set.")
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.num_classes = num_classes
self.use_checkpoint = use_checkpoint
self.dtype = torch.float16 if use_fp16 else torch.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
self.predict_codebook_ids = n_embed is not None
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
if self.num_classes is not None:
if isinstance(self.num_classes, int):
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
elif self.num_classes == "continuous":
print("setting up linear c_adm embedding layer")
self.label_emb = nn.Linear(1, time_embed_dim)
else:
raise ValueError()
self.input_blocks = nn.ModuleList(
[
TimestepEmbedSequential(
conv_nd(dims, in_channels, model_channels, 3, padding=1)
)
]
)
self._feature_size = model_channels
input_block_chans = [model_channels]
ch = model_channels
ds = 1
for level, mult in enumerate(channel_mult):
for nr in range(self.num_res_blocks[level]):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=mult * model_channels,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = mult * model_channels
if ds in attention_resolutions:
if num_head_channels == -1:
dim_head = ch // num_heads
else:
num_heads = ch // num_head_channels
dim_head = num_head_channels
if legacy:
#num_heads = 1
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
if exists(disable_self_attentions):
disabled_sa = disable_self_attentions[level]
else:
disabled_sa = False
if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=dim_head,
use_new_attention_order=use_new_attention_order,
) if not use_spatial_transformer else SpatialTransformer(
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
use_checkpoint=use_checkpoint
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
out_ch = ch
if resblock_updown:
stage_last_block = ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
down=True,
)
else:
stage_last_block = Downsample(
ch, conv_resample, dims=dims, out_channels=out_ch
)
self.input_blocks.append(
TimestepEmbedSequential(stage_last_block)
)
ch = out_ch
input_block_chans.append(ch)
ds *= 2
self._feature_size += ch
if num_head_channels == -1:
dim_head = ch // num_heads
else:
num_heads = ch // num_head_channels
dim_head = num_head_channels
if legacy:
#num_heads = 1
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=dim_head,
use_new_attention_order=use_new_attention_order,
) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,
use_checkpoint=use_checkpoint
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
self.output_blocks1 = nn.ModuleList([])
self.output_blocks2 = nn.ModuleList([])
for level, mult in list(enumerate(channel_mult))[::-1]:
for i in range(self.num_res_blocks[level] + 1):
ich = input_block_chans.pop()
layers1 = [
ResBlock(
ch + ich,
time_embed_dim,
dropout,
out_channels=model_channels * mult,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
layers2 = [
ResBlock(
ch + ich,
time_embed_dim,
dropout,
out_channels=model_channels * mult,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = model_channels * mult
if ds in attention_resolutions:
if num_head_channels == -1:
dim_head = ch // num_heads
else:
num_heads = ch // num_head_channels
dim_head = num_head_channels
if legacy:
#num_heads = 1
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
if exists(disable_self_attentions):
disabled_sa = disable_self_attentions[level]
else:
disabled_sa = False
if not exists(num_attention_blocks) or i < num_attention_blocks[level]:
layers1.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads_upsample,
num_head_channels=dim_head,
use_new_attention_order=use_new_attention_order,
) if not use_spatial_transformer else SpatialTransformer(
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
use_checkpoint=use_checkpoint
)
)
layers2.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads_upsample,
num_head_channels=dim_head,
use_new_attention_order=use_new_attention_order,
) if not use_spatial_transformer else SpatialTransformer(
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
use_checkpoint=use_checkpoint
)
)
if level and i == self.num_res_blocks[level]:
out_ch = ch
layers1.append(ResBlock(ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
up=True,
) if resblock_updown else | Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)) | 4 | 2023-10-19 05:08:12+00:00 | 12k |
e4s2023/E4S2023 | swap_face_fine/defliker/src/stage1_neural_atlas.py | [
{
"identifier": "IMLP",
"path": "swap_face_fine/defliker/src/models/stage_1/implicit_neural_networks.py",
"snippet": "class IMLP(nn.Module):\n def __init__(\n self,\n input_dim,\n output_dim,\n hidden_dim=256,\n use_positional=True,\n ... | import sys
import torch
import torch.optim as optim
import numpy as np
import argparse
import cv2
import glob
import json
import os
import subprocess
from tqdm import tqdm
from swap_face_fine.defliker.src.models.stage_1.implicit_neural_networks import IMLP
from swap_face_fine.defliker.src.models.stage_1.evaluate import evaluate_model_single
from swap_face_fine.defliker.src.models.stage_1.loss_utils import get_gradient_loss_single, get_rigidity_loss, get_optical_flow_loss
from swap_face_fine.defliker.src.models.stage_1.unwrap_utils import get_tuples, pre_train_mapping, load_input_data_single, save_mask_flow
from pathlib import Path
from datetime import datetime
from torch.utils.tensorboard import SummaryWriter | 8,012 |
# set gpu
select_gpu = "0" # default use 0
os.environ["CUDA_VISIBLE_DEVICES"] = select_gpu
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(config, args):
maximum_number_of_frames = config["maximum_number_of_frames"]
# read the first frame of vid path and get its resolution
frames_list = sorted(glob.glob(os.path.join(args.vid_path, "*g")))
frame_temp = cv2.imread(frames_list[0])
resx = frame_temp.shape[1]
resy = frame_temp.shape[0]
if args.down is not None:
resx = int(resx / args.down)
resy = int(resy / args.down)
iters_num = config["iters_num"]
#batch size:
samples = config["samples_batch"]
# evaluation frequency (in terms of iterations number)
evaluate_every = np.int64(config["evaluate_every"])
# optionally it is possible to load a checkpoint
load_checkpoint = config["load_checkpoint"] # set to true to continue from a checkpoint
checkpoint_path = config["checkpoint_path"]
# a data folder that contains folders named "[video_name]","[video_name]_flow","[video_name]_maskrcnn" (optionally)
data_folder = Path(args.vid_path)
# results_folder_name = config["results_folder_name"] # the folder (under the code's folder where the experiments will be saved.
results_folder_name = "results"
# add_to_experiment_folder_name = config["add_to_experiment_folder_name"] # for each experiment folder (saved inside "results_folder_name") add this string
# boolean variables for determining if a pretraining is used:
pretrain_mapping1 = config["pretrain_mapping1"]
pretrain_iter_number = config["pretrain_iter_number"]
# the scale of the atlas uv coordinates relative to frame's xy coordinates
uv_mapping_scale = config["uv_mapping_scale"]
# M_f's hyper parameters
use_positional_encoding_mapping1 = config["use_positional_encoding_mapping1"]
number_of_positional_encoding_mapping1 = config["number_of_positional_encoding_mapping1"]
number_of_layers_mapping1 = config["number_of_layers_mapping1"]
number_of_channels_mapping1 = config["number_of_channels_mapping1"]
# Atlas MLP's hyper parameters
number_of_channels_atlas = config["number_of_channels_atlas"]
number_of_layers_atlas = config["number_of_layers_atlas"]
positional_encoding_num_atlas = config[
"positional_encoding_num_atlas"]
# coefficients for the different loss terms
rgb_coeff = config["rgb_coeff"] # coefficient for rgb loss term:
# optical flow loss term coefficient (beta_f in the paper):
optical_flow_coeff = config["optical_flow_coeff"]
use_gradient_loss = config["use_gradient_loss"]
gradient_loss_coeff = config["gradient_loss_coeff"]
rigidity_coeff = config["rigidity_coeff"] # coefficient for the rigidity loss term
derivative_amount = config["derivative_amount"] # For finite differences gradient computation:
# for using global (in addition to the current local) rigidity loss:
include_global_rigidity_loss = config["include_global_rigidity_loss"]
# Finite differences parameters for the global rigidity terms:
global_rigidity_derivative_amount_fg = config["global_rigidity_derivative_amount_fg"]
global_rigidity_coeff_fg = config["global_rigidity_coeff_fg"]
stop_global_rigidity = config["stop_global_rigidity"]
use_optical_flow = True
vid_name = data_folder.name
vid_root = data_folder.parent
results_folder = Path(
f'./{results_folder_name}/{vid_name}/stage_1')
results_folder.mkdir(parents=True, exist_ok=True)
with open('%s/config.json' % results_folder, 'w') as json_file:
json.dump(config, json_file, indent=4)
writer = SummaryWriter(log_dir=str(results_folder))
optical_flows_mask, video_frames, optical_flows_reverse_mask, mask_frames, video_frames_dx, video_frames_dy, optical_flows_reverse, optical_flows = load_input_data_single(
resy, resx, maximum_number_of_frames, data_folder, True, True, vid_root, vid_name)
number_of_frames=video_frames.shape[3]
# save a video showing the masked part of the forward optical flow:s
|
# set gpu
select_gpu = "0" # default use 0
os.environ["CUDA_VISIBLE_DEVICES"] = select_gpu
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(config, args):
maximum_number_of_frames = config["maximum_number_of_frames"]
# read the first frame of vid path and get its resolution
frames_list = sorted(glob.glob(os.path.join(args.vid_path, "*g")))
frame_temp = cv2.imread(frames_list[0])
resx = frame_temp.shape[1]
resy = frame_temp.shape[0]
if args.down is not None:
resx = int(resx / args.down)
resy = int(resy / args.down)
iters_num = config["iters_num"]
#batch size:
samples = config["samples_batch"]
# evaluation frequency (in terms of iterations number)
evaluate_every = np.int64(config["evaluate_every"])
# optionally it is possible to load a checkpoint
load_checkpoint = config["load_checkpoint"] # set to true to continue from a checkpoint
checkpoint_path = config["checkpoint_path"]
# a data folder that contains folders named "[video_name]","[video_name]_flow","[video_name]_maskrcnn" (optionally)
data_folder = Path(args.vid_path)
# results_folder_name = config["results_folder_name"] # the folder (under the code's folder where the experiments will be saved.
results_folder_name = "results"
# add_to_experiment_folder_name = config["add_to_experiment_folder_name"] # for each experiment folder (saved inside "results_folder_name") add this string
# boolean variables for determining if a pretraining is used:
pretrain_mapping1 = config["pretrain_mapping1"]
pretrain_iter_number = config["pretrain_iter_number"]
# the scale of the atlas uv coordinates relative to frame's xy coordinates
uv_mapping_scale = config["uv_mapping_scale"]
# M_f's hyper parameters
use_positional_encoding_mapping1 = config["use_positional_encoding_mapping1"]
number_of_positional_encoding_mapping1 = config["number_of_positional_encoding_mapping1"]
number_of_layers_mapping1 = config["number_of_layers_mapping1"]
number_of_channels_mapping1 = config["number_of_channels_mapping1"]
# Atlas MLP's hyper parameters
number_of_channels_atlas = config["number_of_channels_atlas"]
number_of_layers_atlas = config["number_of_layers_atlas"]
positional_encoding_num_atlas = config[
"positional_encoding_num_atlas"]
# coefficients for the different loss terms
rgb_coeff = config["rgb_coeff"] # coefficient for rgb loss term:
# optical flow loss term coefficient (beta_f in the paper):
optical_flow_coeff = config["optical_flow_coeff"]
use_gradient_loss = config["use_gradient_loss"]
gradient_loss_coeff = config["gradient_loss_coeff"]
rigidity_coeff = config["rigidity_coeff"] # coefficient for the rigidity loss term
derivative_amount = config["derivative_amount"] # For finite differences gradient computation:
# for using global (in addition to the current local) rigidity loss:
include_global_rigidity_loss = config["include_global_rigidity_loss"]
# Finite differences parameters for the global rigidity terms:
global_rigidity_derivative_amount_fg = config["global_rigidity_derivative_amount_fg"]
global_rigidity_coeff_fg = config["global_rigidity_coeff_fg"]
stop_global_rigidity = config["stop_global_rigidity"]
use_optical_flow = True
vid_name = data_folder.name
vid_root = data_folder.parent
results_folder = Path(
f'./{results_folder_name}/{vid_name}/stage_1')
results_folder.mkdir(parents=True, exist_ok=True)
with open('%s/config.json' % results_folder, 'w') as json_file:
json.dump(config, json_file, indent=4)
writer = SummaryWriter(log_dir=str(results_folder))
optical_flows_mask, video_frames, optical_flows_reverse_mask, mask_frames, video_frames_dx, video_frames_dy, optical_flows_reverse, optical_flows = load_input_data_single(
resy, resx, maximum_number_of_frames, data_folder, True, True, vid_root, vid_name)
number_of_frames=video_frames.shape[3]
# save a video showing the masked part of the forward optical flow:s | save_mask_flow(optical_flows_mask, video_frames, results_folder) | 8 | 2023-10-15 12:15:01+00:00 | 12k |
sotopia-lab/sotopia | sotopia-chat/chat_server.py | [
{
"identifier": "redis_agent",
"path": "sotopia/agents/redis_agent.py",
"snippet": "class RedisAgent(BaseAgent[Observation, AgentAction]):\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n session_id: str | None = None,\n agen... | import asyncio
import logging
import os
import random
import subprocess
import redis.asyncio as redis
import typer
from asyncio import gather
from asyncio import run as aiorun
from datetime import datetime
from logging import FileHandler
from typing import Literal, cast
from rich.logging import RichHandler
from sotopia.agents import redis_agent
from sotopia.agents.llm_agent import LLMAgent
from sotopia.database import EnvAgentComboStorage
from sotopia.database.persistent_profile import (
AgentProfile,
EnvironmentList,
EnvironmentProfile,
)
from sotopia.envs.evaluators import (
ReachGoalLLMEvaluator,
RuleBasedTerminatedEvaluator,
)
from sotopia.envs.parallel import ParallelSotopiaEnv
from sotopia.server import arun_one_episode | 9,170 |
process = subprocess.Popen(
["git", "rev-parse", "HEAD"], shell=False, stdout=subprocess.PIPE
)
git_head_hash = process.communicate()[0].strip()
FORMAT = "%(asctime)s - %(levelname)s - %(name)s - %(message)s"
logging.basicConfig(
level=15,
format=FORMAT,
datefmt="[%X]",
handlers=[
RichHandler(),
FileHandler(
datetime.now().strftime(
f"./logs/%H_%M_%d_%m_%Y_{str(git_head_hash.decode('utf-8'))}.log"
)
),
],
)
app = typer.Typer()
async def _start_server_with_two_session_ids_and_agent_env_combo(
session_ids: list[str], agent_env_combo_pk: str
) -> None:
env_agent_combo_storage = EnvAgentComboStorage.get(agent_env_combo_pk)
env = ParallelSotopiaEnv(
env_profile=EnvironmentProfile.get(env_agent_combo_storage.env_id),
model_name="gpt-4",
action_order="round-robin",
evaluators=[
RuleBasedTerminatedEvaluator(max_turn_number=20, max_stale_turn=2),
],
terminal_evaluators=[
ReachGoalLLMEvaluator("gpt-4"),
],
)
random.shuffle(session_ids)
agents = [
redis_agent.RedisAgent(
agent_profile=AgentProfile.get(
env_agent_combo_storage.agent_ids[idx]
),
session_id=session_id,
)
for idx, session_id in enumerate(session_ids)
]
await arun_one_episode(
env,
agents,
{"env": "gpt-4", "agent1": "redis", "agent2": "redis"},
tag="human_human_v0.0.3_dryrun",
push_to_db=True,
)
async def _start_server_with_one_session_id_and_agent_env_combo(
session_id: str,
agent_env_combo_pk: str,
left_or_right: Literal["left", "right"],
) -> None:
env_agent_combo_storage = EnvAgentComboStorage.get(agent_env_combo_pk)
env = ParallelSotopiaEnv(
env_profile=EnvironmentProfile.get(env_agent_combo_storage.env_id),
model_name="gpt-4",
action_order="round-robin",
evaluators=[
RuleBasedTerminatedEvaluator(max_turn_number=20, max_stale_turn=2),
],
terminal_evaluators=[
ReachGoalLLMEvaluator("gpt-4"),
],
)
agents = (
[
redis_agent.RedisAgent(
agent_profile=AgentProfile.get(
env_agent_combo_storage.agent_ids[0]
),
session_id=session_id,
),
|
process = subprocess.Popen(
["git", "rev-parse", "HEAD"], shell=False, stdout=subprocess.PIPE
)
git_head_hash = process.communicate()[0].strip()
FORMAT = "%(asctime)s - %(levelname)s - %(name)s - %(message)s"
logging.basicConfig(
level=15,
format=FORMAT,
datefmt="[%X]",
handlers=[
RichHandler(),
FileHandler(
datetime.now().strftime(
f"./logs/%H_%M_%d_%m_%Y_{str(git_head_hash.decode('utf-8'))}.log"
)
),
],
)
app = typer.Typer()
async def _start_server_with_two_session_ids_and_agent_env_combo(
session_ids: list[str], agent_env_combo_pk: str
) -> None:
env_agent_combo_storage = EnvAgentComboStorage.get(agent_env_combo_pk)
env = ParallelSotopiaEnv(
env_profile=EnvironmentProfile.get(env_agent_combo_storage.env_id),
model_name="gpt-4",
action_order="round-robin",
evaluators=[
RuleBasedTerminatedEvaluator(max_turn_number=20, max_stale_turn=2),
],
terminal_evaluators=[
ReachGoalLLMEvaluator("gpt-4"),
],
)
random.shuffle(session_ids)
agents = [
redis_agent.RedisAgent(
agent_profile=AgentProfile.get(
env_agent_combo_storage.agent_ids[idx]
),
session_id=session_id,
)
for idx, session_id in enumerate(session_ids)
]
await arun_one_episode(
env,
agents,
{"env": "gpt-4", "agent1": "redis", "agent2": "redis"},
tag="human_human_v0.0.3_dryrun",
push_to_db=True,
)
async def _start_server_with_one_session_id_and_agent_env_combo(
session_id: str,
agent_env_combo_pk: str,
left_or_right: Literal["left", "right"],
) -> None:
env_agent_combo_storage = EnvAgentComboStorage.get(agent_env_combo_pk)
env = ParallelSotopiaEnv(
env_profile=EnvironmentProfile.get(env_agent_combo_storage.env_id),
model_name="gpt-4",
action_order="round-robin",
evaluators=[
RuleBasedTerminatedEvaluator(max_turn_number=20, max_stale_turn=2),
],
terminal_evaluators=[
ReachGoalLLMEvaluator("gpt-4"),
],
)
agents = (
[
redis_agent.RedisAgent(
agent_profile=AgentProfile.get(
env_agent_combo_storage.agent_ids[0]
),
session_id=session_id,
), | LLMAgent( | 1 | 2023-10-23 19:47:26+00:00 | 12k |
Qualcomm-AI-research/geometric-algebra-transformer | gatr/experiments/nbody/wrappers.py | [
{
"identifier": "GCAGNN",
"path": "gatr/baselines/gcan.py",
"snippet": "class GCAGNN(nn.Module):\n \"\"\"GCA-GNN model as described in D. Ruhe et al.\n\n The model was described in \"Geometric Clifford Algebra Networks\" by D.Ruhe et al.,\n and in private communication from D. Ruhe.\n\n Comb... | import dgl
import numpy as np
import torch
from e3nn.o3 import Irreps, spherical_harmonics
from torch import nn
from torch_geometric.data import Data
from torch_geometric.nn import knn_graph
from torch_scatter import scatter
from gatr.baselines.gcan import GCAGNN
from gatr.baselines.transformer import BaselineAxialTransformer, BaselineTransformer
from gatr.experiments.base_wrapper import BaseWrapper
from gatr.interface import (
embed_point,
embed_scalar,
embed_translation,
extract_point,
extract_point_embedding_reg,
)
from gatr.utils.misc import make_full_edge_index | 7,294 | SE3-Transformer model.
"""
def __init__(self, net, canonicalize_to_com=True, canonicalize_mode="com"):
super().__init__()
self.net = net
self.canonicalize_to_com = canonicalize_to_com
self.canonicalize_mode = canonicalize_mode
self.supports_variable_items = True
def forward(self, inputs):
"""Wrapped forward pass.
Parameters
----------
inputs : torch.Tensor
Raw inputs, as given by dataset.
Returns
-------
outputs : torch.Tensor
Raw outputs, as expected in dataset.
other : torch.Tensor
Dummy term, since the baselines do not require regularization.
Raises
------
ValueError
If `self.canonicalize_mode` is invalid.
"""
batchsize, num_objects, _ = inputs.shape
# Separate into scalars and vectors
masses = inputs[:, :, [0]] # (batchsize, objects, 1)
locations = inputs[:, :, 1:4] # (batchsize, objects, 3)
velocities = inputs[:, :, 4:7] # (batchsize, objects, 3)
# Canonicalize to center-of-mass frame if requested
if self.canonicalize_to_com:
if self.canonicalize_mode == "com":
weights = masses
elif self.canonicalize_mode == "heaviest":
weights = torch.exp(2.0 * masses.double())
else:
raise ValueError(f"Unknown canonicalization mode {self.canonicalize_mode}")
com = torch.sum(
weights / torch.sum(weights, dim=-2, keepdim=True) * locations.double(),
dim=-2,
keepdim=True,
).float()
locations = locations - com
else:
com = torch.zeros_like(locations)
# Represent as graph
graphs = self._build_graphs(locations, velocities, masses)
# Push through model
predictions = self.net(graphs)
predictions = predictions[:, 0, :] # Only positions, not velocities
predictions = predictions.view(batchsize, num_objects, 3)
predictions = (
locations + predictions
) # Model predicts positions relative to initial pos, make it absolute
# Undo canonicalization
if self.canonicalize_to_com:
predictions = predictions + com
return predictions, torch.zeros(batchsize, device=inputs.device)
def _build_graphs(self, locations, velocities, masses):
"""Builds graph for a full batch."""
graphs = [
self._build_graph(loc, vel, m) for loc, vel, m in zip(locations, velocities, masses)
]
graphs = dgl.batch(graphs)
return graphs
def _build_graph(self, locations, velocities, masses):
"""Builds graph for a single sample."""
n_points = len(locations)
indices_src, indices_dst = self._fully_connected_idx(n_points)
graph = dgl.DGLGraph((indices_src, indices_dst)).to(locations.device)
graph.ndata["x"] = torch.unsqueeze(locations, dim=1) # [N, 1, 3]
graph.ndata["v"] = torch.unsqueeze(velocities, dim=1) # [N, 1, 3]
graph.ndata["c"] = torch.unsqueeze(masses, dim=1) # [N, 1, 1]
graph.edata["d"] = locations[indices_dst] - locations[indices_src] # relative postions
graph.edata["w"] = masses[indices_dst] * masses[indices_src]
return graph
@staticmethod
def _fully_connected_idx(num_atoms):
"""Creates source and destination indices for a fully connected graph."""
src = []
dst = []
for i in range(num_atoms):
for j in range(num_atoms):
if i != j:
src.append(i)
dst.append(j)
return np.array(src), np.array(dst)
class NBodyGCANWrapper(nn.Module):
"""Wraps around GCA-MLP and GCA-GNN baselines for the n-body experiment.
Parameters
----------
net : torch.nn.Module
GCAN model that accepts inputs with multivector inputs with 1 channel and
returns multivector outputs with 1 channel.
"""
def __init__(self, net, geometric_batching=False):
super().__init__()
self.net = net
self._geometric_batching = geometric_batching
| # Copyright (c) 2023 Qualcomm Technologies, Inc.
# All rights reserved.
def embed_nbody_data_in_pga(inputs):
"""Represent the n-body initial state in PGA multivectors.
Masses are represented as scalars, positions as trivectors, and velocities as bivectors
(like translations). All three are summed (this is equivalent to concatenation, as an equi
linear layer can easily separate the grades again).
This function is used both by the GATr and by the GCAN wrappers.
Parameters
----------
inputs : torch.Tensor with shape (batchsize, objects, 7)
n-body initial state: a concatenation of masses, initial positions, and initial
velocities along the feature dimension.
Returns
-------
multivector : torch.Tensor with shape (batchsize, objects, 1, 16)
GA embedding.
"""
# Build one multivector holding masses, points, and velocities for each object
masses = inputs[:, :, [0]] # (batchsize, objects, 1)
masses = embed_scalar(masses) # (batchsize, objects, 16)
points = inputs[:, :, 1:4] # (batchsize, objects, 3)
points = embed_point(points) # (batchsize, objects, 16)
velocities = inputs[:, :, 4:7] # (batchsize, objects, 3)
velocities = embed_translation(velocities) # (batchsize, objects, 16)
multivector = masses + points + velocities # (batchsize, objects, 16)
# Insert channel dimension
multivector = multivector.unsqueeze(2) # (batchsize, objects, 1, 16)
return multivector
class NBodyGATrWrapper(BaseWrapper):
"""Wraps around GATr for the n-body prediction experiment.
Parameters
----------
net : torch.nn.Module
GATr model that accepts inputs with 1 multivector channel and 1 scalar channel, and
returns outputs with 1 multivector channel and 1 scalar channel.
"""
def __init__(self, net):
super().__init__(net, scalars=True, return_other=True)
self.supports_variable_items = True
def embed_into_ga(self, inputs):
"""Embeds raw inputs into the geometric algebra (+ scalar) representation.
Parameters
----------
inputs : torch.Tensor with shape (batchsize, objects, 7)
n-body initial state: a concatenation of masses, initial positions, and initial
velocities along the feature dimension.
Returns
-------
mv_inputs : torch.Tensor
Multivector representation of masses, positions, and velocities.
scalar_inputs : torch.Tensor or None
Dummy auxiliary scalars, containing no information.
"""
batchsize, num_objects, _ = inputs.shape
# Build one multivector holding masses, positions, and velocities for each object
multivector = embed_nbody_data_in_pga(inputs)
# Scalar inputs are not really needed here
scalars = torch.zeros((batchsize, num_objects, 1), device=inputs.device)
return multivector, scalars
def extract_from_ga(self, multivector, scalars):
"""Extracts raw outputs from the GATr multivector + scalar outputs.
We parameterize the predicted final positions as points.
Parameters
----------
multivector : torch.Tensor
Multivector outputs from GATr.
scalars : torch.Tensor or None
Scalar outputs from GATr.
Returns
-------
outputs : torch.Tensor
Predicted final-state positions.
other : torch.Tensor
Regularization terms.
"""
# Check channels of inputs. Batchsize and object numbers are free.
assert multivector.shape[2:] == (1, 16)
assert scalars.shape[2:] == (1,)
# Extract position
points = extract_point(multivector[:, :, 0, :])
# Extract non-point components and compute regularization
other = extract_point_embedding_reg(multivector[:, :, 0, :])
reg = torch.sum(other**2, dim=[1, 2])
if self.scalars:
reg = reg + torch.sum(scalars**2, dim=[1, 2])
return points, reg
class NBodyBaselineWrapper(nn.Module):
"""Wraps around simple baselines (MLP or Transformer) for the n-body prediction experiment.
Parameters
----------
net : torch.nn.Module
Model that accepts inputs with 7 channels and returns outputs with 3 channels.
"""
def __init__(self, net):
super().__init__()
self.net = net
self.supports_variable_items = isinstance(
net, (BaselineTransformer, BaselineAxialTransformer)
)
def forward(self, inputs):
"""Wrapped forward pass.
Parameters
----------
inputs : torch.Tensor
Raw inputs, as given by dataset.
Returns
-------
outputs : torch.Tensor
Raw outputs, as expected in dataset.
other : torch.Tensor
Dummy term, since the baselines do not require regularization.
"""
batchsize = inputs.shape[0]
return self.net(inputs), torch.zeros(batchsize, device=inputs.device)
class NBodySEGNNWrapper(nn.Module):
"""Wraps around the SEGNN baseline for the n-body prediction experiment.
Parameters
----------
net : torch.nn.Module
SEGNN model that accepts inputs with inputs with 2 vector channels and 1 scalar channel,
and returns outputs with 1 vector channel.
"""
def __init__(self, net, neighbors, lmax_attr, canonicalize_mode="com"):
super().__init__()
self.net = net
self.canonicalize_mode = canonicalize_mode
self.neighbors = neighbors
self.transform_attr_irreps = Irreps.spherical_harmonics(lmax_attr)
self.supports_variable_items = True
def forward(self, inputs):
"""Wrapped forward pass.
Parameters
----------
inputs : torch.Tensor
Raw inputs, as given by dataset.
Returns
-------
outputs : torch.Tensor
Raw outputs, as expected in dataset.
other : torch.Tensor
Dummy term, since the baselines do not require regularization.
Raises
------
ValueError
If `self.canonicalize_mode` is invalid.
"""
batchsize, num_objects, _ = inputs.shape
# Separate into scalars and vectors
masses = inputs[:, :, [0]] # (batchsize, objects, 1)
locations = inputs[:, :, 1:4] # (batchsize, objects, 3)
velocities = inputs[:, :, 4:7] # (batchsize, objects, 3)
# Canonicalize
if self.canonicalize_mode == "com":
weights = masses
elif self.canonicalize_mode == "heaviest":
weights = torch.exp(2.0 * masses.double())
elif self.canonicalize_mode == "even":
weights = torch.ones_like(masses)
else:
raise ValueError(f"Unknown canonicalization mode {self.canonicalize_mode}")
com = torch.sum(
weights / torch.sum(weights, dim=-2, keepdim=True) * locations.double(),
dim=-2,
keepdim=True,
).float()
locations = locations - com
# Represent as graph
graph = Data(pos=locations.view(-1, 3), vel=velocities.view(-1, 3), mass=masses.view(-1, 1))
batch = torch.arange(0, batchsize, device=inputs.device)
graph.batch = batch.repeat_interleave(num_objects).to(inputs.device, torch.long)
graph.edge_index = knn_graph(locations.view(-1, 3), self.neighbors, graph.batch)
graph = self._augment_gravity_graph(graph) # Add O3 attributes
# Push through model
pred_shift = self.net(graph)
pred_shift = pred_shift.view(batchsize, num_objects, 3)
predictions = (
locations + pred_shift
) # The model predicts the shift, not the final positions
# Undo canonicalization
predictions = predictions + com
return predictions, torch.zeros(batchsize, device=inputs.device)
def _augment_gravity_graph(self, graph):
"""SEGNN feature engineering for n-body experiments.
Constructs node features (position relative to mean position, velocity embedding, absolute
velocity) and edge features (pairwise distances, product of charges / masses).
"""
pos = graph.pos
vel = graph.vel
mass = graph.mass
prod_mass = mass[graph.edge_index[0]] * mass[graph.edge_index[1]]
rel_pos = pos[graph.edge_index[0]] - pos[graph.edge_index[1]]
edge_dist = torch.sqrt(rel_pos.pow(2).sum(1, keepdims=True))
graph.edge_attr = spherical_harmonics(
self.transform_attr_irreps, rel_pos, normalize=True, normalization="integral"
)
vel_embedding = spherical_harmonics(
self.transform_attr_irreps, vel, normalize=True, normalization="integral"
)
graph.node_attr = (
scatter(graph.edge_attr, graph.edge_index[1], dim=0, reduce="mean") + vel_embedding
)
vel_abs = torch.sqrt(vel.pow(2).sum(1, keepdims=True))
graph.x = torch.cat((pos, vel, vel_abs), 1) # Note that pos is here already canonicalized
graph.additional_message_features = torch.cat((edge_dist, prod_mass), dim=-1)
return graph
class NBodySE3TransformerWrapper(nn.Module):
"""Wraps around the SE3-Transformer baseline for the n-body prediction experiment.
Parameters
----------
net : torch.nn.Module
SE3-Transformer model.
"""
def __init__(self, net, canonicalize_to_com=True, canonicalize_mode="com"):
super().__init__()
self.net = net
self.canonicalize_to_com = canonicalize_to_com
self.canonicalize_mode = canonicalize_mode
self.supports_variable_items = True
def forward(self, inputs):
"""Wrapped forward pass.
Parameters
----------
inputs : torch.Tensor
Raw inputs, as given by dataset.
Returns
-------
outputs : torch.Tensor
Raw outputs, as expected in dataset.
other : torch.Tensor
Dummy term, since the baselines do not require regularization.
Raises
------
ValueError
If `self.canonicalize_mode` is invalid.
"""
batchsize, num_objects, _ = inputs.shape
# Separate into scalars and vectors
masses = inputs[:, :, [0]] # (batchsize, objects, 1)
locations = inputs[:, :, 1:4] # (batchsize, objects, 3)
velocities = inputs[:, :, 4:7] # (batchsize, objects, 3)
# Canonicalize to center-of-mass frame if requested
if self.canonicalize_to_com:
if self.canonicalize_mode == "com":
weights = masses
elif self.canonicalize_mode == "heaviest":
weights = torch.exp(2.0 * masses.double())
else:
raise ValueError(f"Unknown canonicalization mode {self.canonicalize_mode}")
com = torch.sum(
weights / torch.sum(weights, dim=-2, keepdim=True) * locations.double(),
dim=-2,
keepdim=True,
).float()
locations = locations - com
else:
com = torch.zeros_like(locations)
# Represent as graph
graphs = self._build_graphs(locations, velocities, masses)
# Push through model
predictions = self.net(graphs)
predictions = predictions[:, 0, :] # Only positions, not velocities
predictions = predictions.view(batchsize, num_objects, 3)
predictions = (
locations + predictions
) # Model predicts positions relative to initial pos, make it absolute
# Undo canonicalization
if self.canonicalize_to_com:
predictions = predictions + com
return predictions, torch.zeros(batchsize, device=inputs.device)
def _build_graphs(self, locations, velocities, masses):
"""Builds graph for a full batch."""
graphs = [
self._build_graph(loc, vel, m) for loc, vel, m in zip(locations, velocities, masses)
]
graphs = dgl.batch(graphs)
return graphs
def _build_graph(self, locations, velocities, masses):
"""Builds graph for a single sample."""
n_points = len(locations)
indices_src, indices_dst = self._fully_connected_idx(n_points)
graph = dgl.DGLGraph((indices_src, indices_dst)).to(locations.device)
graph.ndata["x"] = torch.unsqueeze(locations, dim=1) # [N, 1, 3]
graph.ndata["v"] = torch.unsqueeze(velocities, dim=1) # [N, 1, 3]
graph.ndata["c"] = torch.unsqueeze(masses, dim=1) # [N, 1, 1]
graph.edata["d"] = locations[indices_dst] - locations[indices_src] # relative postions
graph.edata["w"] = masses[indices_dst] * masses[indices_src]
return graph
@staticmethod
def _fully_connected_idx(num_atoms):
"""Creates source and destination indices for a fully connected graph."""
src = []
dst = []
for i in range(num_atoms):
for j in range(num_atoms):
if i != j:
src.append(i)
dst.append(j)
return np.array(src), np.array(dst)
class NBodyGCANWrapper(nn.Module):
"""Wraps around GCA-MLP and GCA-GNN baselines for the n-body experiment.
Parameters
----------
net : torch.nn.Module
GCAN model that accepts inputs with multivector inputs with 1 channel and
returns multivector outputs with 1 channel.
"""
def __init__(self, net, geometric_batching=False):
super().__init__()
self.net = net
self._geometric_batching = geometric_batching | self.supports_variable_items = isinstance(net, GCAGNN) | 0 | 2023-10-23 15:58:36+00:00 | 12k |
tomguluson92/cloth2tex | phase1_inference.py | [
{
"identifier": "ClothRenderer",
"path": "renderer/cloth_renderer.py",
"snippet": "class ClothRenderer(object):\n \n def __init__(self, objfile, resolution=512, focal_distance=1.6, scale_factor=1):\n self.device = torch.device(\"cuda:0\")\n\n self.img_size = resolution\n self.... | import argparse
import datetime
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
import numpy as np
import pickle
import os
import os.path as osp
import torchvision
import torchvision.transforms as transforms
import torch.nn.functional as F
import thinplate as tps
import time
import matplotlib.pyplot as plt
import importlib
import random
import json
import cv2
from torchvision.models.feature_extraction import create_feature_extractor, get_graph_node_names
from renderer.cloth_renderer import ClothRenderer
from PIL import Image
from utils.frequency import extract_ampl_phase
from utils.binary_function import Binarize
from utils.tvl_loss import TVLoss, TVMaskLoss
from tqdm import tqdm
from pytorch3d.io import load_obj, save_obj
from itertools import chain
from pytorch3d.structures import Meshes
from pytorch3d.transforms import RotateAxisAngle
from pytorch3d.loss import (
mesh_edge_loss,
mesh_laplacian_smoothing,
mesh_normal_consistency,
)
from lib.deformation_graph import DeformationGraph
from lib.mesh_sampling import generate_transform_matrices_coma
from lib.utils_dg import to_edge_index, to_sparse, get_vert_connectivity, scipy_to_torch_sparse
from models import DeformGraphModel
from torch_geometric.transforms import FaceToEdge
from torch_geometric.data import Data
from psbody.mesh import Mesh
from torch_geometric.io import read_ply | 8,630 | # -*- coding: utf-8 -*-
"""
@date: 2023.03.29-31 week13
@func: PhaseI inference code.
"""
class Trainer(object):
def __init__(self, objfile, savedir, resolution=512, focal_distance=2, verts_num=9648, scale_factor=1.0):
self.device = torch.device("cuda")
#set mesh and visualizer----------------------
| # -*- coding: utf-8 -*-
"""
@date: 2023.03.29-31 week13
@func: PhaseI inference code.
"""
class Trainer(object):
def __init__(self, objfile, savedir, resolution=512, focal_distance=2, verts_num=9648, scale_factor=1.0):
self.device = torch.device("cuda")
#set mesh and visualizer---------------------- | self.cloth_renderer = ClothRenderer(objfile, resolution, focal_distance, scale_factor) | 0 | 2023-10-17 11:30:53+00:00 | 12k |
uukuguy/multi_loras | multi_loras/slora/router/model_infer/model_rpc.py | [
{
"identifier": "sample",
"path": "multi_loras/slora/router/model_infer/post_process.py",
"snippet": "def sample(logits, batch:InferBatch):\n logits = logits.contiguous()\n presence_penalties, frequency_penalties, temperatures, top_ps, top_ks, p_token_ids, p_token_counts, p_cumsum_seq_len, p_max_l... | import asyncio
import numpy as np
import rpyc
import torch
import traceback
import time
import torch
import torch.distributed as dist
import torch
import torch.distributed as dist
import multiprocessing
from collections import defaultdict
from datetime import timedelta
from tqdm import tqdm
from typing import Dict, List, Tuple
from rpyc.utils.classic import obtain
from transformers.configuration_utils import PretrainedConfig
from .post_process import sample
from .infer_batch import InferBatch
from .infer_adapter import InferAdapter
from .naive_infer_adapter import NaiveInferAdapter
from slora.common.configs.config import setting
from slora.models.llama.model import LlamaTpPartModel
from slora.models.llama2.model import Llama2TpPartModel
from slora.models.peft.lora_adapter import LoraTpPartAdapter
from slora.models.peft.lora_unordered_batch_infer import LoraUnorderedBatchInfer
from slora.models.peft.lora_single_batch_infer import LoraPEFTBatchInfer
from slora.models.bmm.lora_bmm_infer import LoraBmmInfer
from slora.utils.infer_utils import set_random_seed
from slora.utils.infer_utils import calculate_time, mark_start, mark_end
from slora.utils.model_utils import get_model_config
from rpyc.utils.server import ThreadedServer | 9,874 |
class ModelRpcServer(rpyc.Service):
def exposed_init_model(self, rank_id, world_size, weight_dir, adapter_dirs,
max_total_token_num, load_way, mode, input_params,
prefetch_stream):
if world_size != 1:
trans_list = [obtain(e) for e in (rank_id, world_size, weight_dir, adapter_dirs,
max_total_token_num, load_way, mode)]
rank_id, world_size, weight_dir, adapter_dirs, max_total_token_num, load_way, mode = trans_list
self.tp_rank = rank_id
self.world_size = world_size
self.load_way = load_way
self.mode = mode
self.input_params = input_params
self.prefetch_stream = prefetch_stream
self.cache = {}
dist.init_process_group('nccl', init_method=f'tcp://127.0.0.1:{setting["nccl_port"]}', rank=rank_id, world_size=world_size)
torch.cuda.set_device(rank_id)
model_cfg = get_model_config(weight_dir, dummy=input_params.dummy)
try:
self.model_type = model_cfg["model_type"]
if self.model_type == "llama":
if "num_key_value_heads" in model_cfg.keys():
self.model = Llama2TpPartModel(rank_id, world_size, weight_dir,
max_total_token_num,
mem_adapter_size=input_params.pool_size_lora,
load_way=load_way, mode=mode,
dummy=input_params.dummy)
else:
self.model = LlamaTpPartModel(rank_id, world_size, weight_dir,
max_total_token_num,
mem_adapter_size=input_params.pool_size_lora,
load_way=load_way, mode=mode,
dummy=input_params.dummy)
else:
raise Exception(f"can not support {self.model_type} now")
except Exception as e:
print("#" * 16)
print("load model error:", str(e), e, type(e))
raise e
''' init adapters '''
# TODO support TP for adapters
# print("adapter_dirs", adapter_dirs)
self.adapters = []
self.adapter_id = {}
for adapter_dir in tqdm(adapter_dirs, desc="load adapters"):
self.adapter_id[adapter_dir] = len(self.adapters)
self.adapters.append(LoraTpPartAdapter(rank_id, world_size, adapter_dir, model_cfg,
swap=input_params.swap, dummy=input_params.dummy,
no_lora_swap=input_params.no_lora_swap,
prefetch_stream=prefetch_stream))
self.adapter_id[None] = len(self.adapters)
self.adapters.append(None)
if input_params.no_mem_pool:
head_num = self.model.config["num_attention_heads"]
self.infer_adapter = NaiveInferAdapter.init(self.model.config["num_hidden_layers"],
head_num,
self.model.config["hidden_size"] // head_num)
else:
|
class ModelRpcServer(rpyc.Service):
def exposed_init_model(self, rank_id, world_size, weight_dir, adapter_dirs,
max_total_token_num, load_way, mode, input_params,
prefetch_stream):
if world_size != 1:
trans_list = [obtain(e) for e in (rank_id, world_size, weight_dir, adapter_dirs,
max_total_token_num, load_way, mode)]
rank_id, world_size, weight_dir, adapter_dirs, max_total_token_num, load_way, mode = trans_list
self.tp_rank = rank_id
self.world_size = world_size
self.load_way = load_way
self.mode = mode
self.input_params = input_params
self.prefetch_stream = prefetch_stream
self.cache = {}
dist.init_process_group('nccl', init_method=f'tcp://127.0.0.1:{setting["nccl_port"]}', rank=rank_id, world_size=world_size)
torch.cuda.set_device(rank_id)
model_cfg = get_model_config(weight_dir, dummy=input_params.dummy)
try:
self.model_type = model_cfg["model_type"]
if self.model_type == "llama":
if "num_key_value_heads" in model_cfg.keys():
self.model = Llama2TpPartModel(rank_id, world_size, weight_dir,
max_total_token_num,
mem_adapter_size=input_params.pool_size_lora,
load_way=load_way, mode=mode,
dummy=input_params.dummy)
else:
self.model = LlamaTpPartModel(rank_id, world_size, weight_dir,
max_total_token_num,
mem_adapter_size=input_params.pool_size_lora,
load_way=load_way, mode=mode,
dummy=input_params.dummy)
else:
raise Exception(f"can not support {self.model_type} now")
except Exception as e:
print("#" * 16)
print("load model error:", str(e), e, type(e))
raise e
''' init adapters '''
# TODO support TP for adapters
# print("adapter_dirs", adapter_dirs)
self.adapters = []
self.adapter_id = {}
for adapter_dir in tqdm(adapter_dirs, desc="load adapters"):
self.adapter_id[adapter_dir] = len(self.adapters)
self.adapters.append(LoraTpPartAdapter(rank_id, world_size, adapter_dir, model_cfg,
swap=input_params.swap, dummy=input_params.dummy,
no_lora_swap=input_params.no_lora_swap,
prefetch_stream=prefetch_stream))
self.adapter_id[None] = len(self.adapters)
self.adapters.append(None)
if input_params.no_mem_pool:
head_num = self.model.config["num_attention_heads"]
self.infer_adapter = NaiveInferAdapter.init(self.model.config["num_hidden_layers"],
head_num,
self.model.config["hidden_size"] // head_num)
else: | self.infer_adapter = InferAdapter.init(self.model.mem_manager, | 2 | 2023-10-16 02:39:47+00:00 | 12k |
MobileLLM/AutoDroid | droidbot/input_policy.py | [
{
"identifier": "UTG",
"path": "droidbot/utg.py",
"snippet": "class UTG(object):\n \"\"\"\n UI transition graph\n \"\"\"\n\n def __init__(self, device, app, random_input):\n self.logger = logging.getLogger(self.__class__.__name__)\n self.device = device\n self.app = app\... | import sys
import json
import re
import logging
import random
import yaml
import copy
import requests
import ast
import time
import tools
import pdb
import os
import traceback
import time
import time
import os
import time
import numpy as np
from abc import abstractmethod
from .input_event import *
from .utg import UTG
from .input_event import ScrollEvent
from query_lmql import prompt_llm_with_history
from xmlrpc.client import ServerProxy
from xmlrpclib import ServerProxy
from InstructorEmbedding import INSTRUCTOR
from sklearn.metrics.pairwise import cosine_similarity | 7,471 | # if current app is in background, bring it to foreground
component = self.app.get_package_name()
if self.app.get_main_activity():
component += "/%s" % self.app.get_main_activity()
return IntentEvent(Intent(suffix=component))
self.logger.info("Replaying %s" % event_path)
self.event_idx = curr_event_idx
self.num_replay_tries = 0
# return InputEvent.from_dict(event_dict["event"])
event = InputEvent.from_dict(event_dict["event"])
self.last_state = self.current_state
self.last_event = event
return event
time.sleep(5)
# raise InputInterruptedException("No more record can be replayed.")
def __update_utg(self):
self.utg.add_transition(self.last_event, self.last_state, self.current_state)
class ManualPolicy(UtgBasedInputPolicy):
"""
manually explore UFG
"""
def __init__(self, device, app):
super(ManualPolicy, self).__init__(device, app, False)
self.logger = logging.getLogger(self.__class__.__name__)
self.__first_event = True
def generate_event_based_on_utg(self):
"""
generate an event based on current UTG
@return: InputEvent
"""
if self.__first_event:
self.__first_event = False
self.logger.info("Trying to start the app...")
start_app_intent = self.app.get_start_intent()
return IntentEvent(intent=start_app_intent)
else:
return ManualEvent()
class TaskPolicy(UtgBasedInputPolicy):
def __init__(self, device, app, random_input, task, use_memory=True, debug_mode=False):
super(TaskPolicy, self).__init__(device, app, random_input)
self.logger = logging.getLogger(self.__class__.__name__)
self.task = task
self.__nav_target = None
self.__nav_num_steps = -1
self.__num_restarts = 0
self.__num_steps_outside = 0
self.__event_trace = ""
self.__missed_states = set()
self.__random_explore = random_input
self.__action_history = []
self.__thought_history = []
self.use_memory = use_memory
# if use_memory:
# self.memory = Memory(app_name=self.app.app_name, app_output_path=self.device.output_dir)
if self.use_memory:
self.similar_ele_path, self.similar_ele_function, self.similar_ele_statement = self.get_most_similar_element()
if not self.similar_ele_function:
self.use_memory = False
print('=============\nWarning: Did not find the memory of this app, the app memory is disabled\n=============')
else:
print(f'============\nFound element: {self.similar_ele_statement}\nPath: {self.similar_ele_path}\nFunction: {self.similar_ele_function}\n============')
self.state_ele_memory = {} # memorize some important states that contain elements of insight
def get_most_similar_element(self):
model = INSTRUCTOR('hkunlp/instructor-xl')
task_embedding = model.encode('task: ' + self.task).reshape(1, -1)
with open('memory/node_filtered_elements.json') as file:
ele_statements = json.load(file)
with open('memory/element_description.json') as file:
ele_functions = json.load(file)
with open('memory/embedded_elements_desc.json') as file:
embeddings = json.load(file)
app_name = self.device.output_dir.split('/')[-1]
if app_name not in embeddings.keys():
return None, None, None
app_embeddings = embeddings[app_name]
# similarities = {}
max_similarity, similar_ele_idx = -9999, -9999
for state_str, elements in app_embeddings.items():
# if the target element is in the first ui, no onclick is needed
# if ele_statements[app_name][state_str]['path'] == []:
# continue
# similarities[state_str] = []
for idx, ele in enumerate(elements):
if ele:
npele = np.array(ele).reshape(1, -1)
similarity = cosine_similarity(task_embedding, npele)[0][0]
else:
similarity = -9999
# similarities[state_str].append(similarity)
if similarity > max_similarity:
max_similarity = similarity
similar_ele_idx = idx
similar_state_str = state_str
similar_ele = ele_statements[app_name][similar_state_str]['elements'][similar_ele_idx]
similar_ele_path = ele_statements[app_name][similar_state_str]['path']
similar_ele_desc = ele_functions[app_name][similar_state_str][similar_ele_idx]
del model
return similar_ele_path, similar_ele_desc, similar_ele
def _scroll_to_top(self, scroller, all_views_for_mark, old_state=None):
prefix_scroll_event = []
if old_state is None:
old_state = self.current_state
for _ in range(MAX_SCROLL_NUM): # first scroll up to the top
| # from memory.memory_builder import Memory
os.environ["TOKENIZERS_PARALLELISM"] = "false"
# Max number of restarts
MAX_NUM_RESTARTS = 5
# Max number of steps outside the app
MAX_NUM_STEPS_OUTSIDE = 1000
MAX_NUM_STEPS_OUTSIDE_KILL = 1000
# Max number of replay tries
MAX_REPLY_TRIES = 5
# Some input event flags
EVENT_FLAG_STARTED = "+started"
EVENT_FLAG_START_APP = "+start_app"
EVENT_FLAG_STOP_APP = "+stop_app"
EVENT_FLAG_EXPLORE = "+explore"
EVENT_FLAG_NAVIGATE = "+navigate"
EVENT_FLAG_TOUCH = "+touch"
# Policy taxanomy
POLICY_NAIVE_DFS = "dfs_naive"
POLICY_GREEDY_DFS = "dfs_greedy"
POLICY_NAIVE_BFS = "bfs_naive"
POLICY_GREEDY_BFS = "bfs_greedy"
POLICY_REPLAY = "replay"
POLICY_MANUAL = "manual"
POLICY_MONKEY = "monkey"
POLICY_TASK = "task"
POLICY_NONE = "none"
POLICY_MEMORY_GUIDED = "memory_guided" # implemented in input_policy2
FINISHED = "task_completed"
MAX_SCROLL_NUM = 7
USE_LMQL = False
class InputInterruptedException(Exception):
pass
def safe_dict_get(view_dict, key, default=None):
return_itm = view_dict[key] if (key in view_dict) else default
if return_itm == None:
return_itm = ''
return return_itm
class InputPolicy(object):
"""
This class is responsible for generating events to stimulate more app behaviour
It should call AppEventManager.send_event method continuously
"""
def __init__(self, device, app):
self.logger = logging.getLogger(self.__class__.__name__)
self.device = device
self.app = app
self.action_count = 0
self.master = None
def start(self, input_manager):
"""
start producing events
:param input_manager: instance of InputManager
"""
self.action_count = 0
while input_manager.enabled and self.action_count < input_manager.event_count:
try:
# # make sure the first event is go to HOME screen
# # the second event is to start the app
# if self.action_count == 0 and self.master is None:
# event = KeyEvent(name="HOME")
# elif self.action_count == 1 and self.master is None:
# event = IntentEvent(self.app.get_start_intent())
if self.action_count == 0 and self.master is None:
event = KillAppEvent(app=self.app)
else:
event = self.generate_event(input_manager)
if event == FINISHED:
break
input_manager.add_event(event)
except KeyboardInterrupt:
break
except InputInterruptedException as e:
self.logger.warning("stop sending events: %s" % e)
break
# except RuntimeError as e:
# self.logger.warning(e.message)
# break
except Exception as e:
self.logger.warning("exception during sending events: %s" % e)
traceback.print_exc()
continue
self.action_count += 1
@abstractmethod
def generate_event(self, input_manager):
"""
generate an event
@return:
"""
pass
class NoneInputPolicy(InputPolicy):
"""
do not send any event
"""
def __init__(self, device, app):
super(NoneInputPolicy, self).__init__(device, app)
def generate_event(self):
"""
generate an event
@return:
"""
return None
class UtgBasedInputPolicy(InputPolicy):
"""
state-based input policy
"""
def __init__(self, device, app, random_input):
super(UtgBasedInputPolicy, self).__init__(device, app)
self.random_input = random_input
self.script = None
self.master = None
self.script_events = []
self.last_event = None
self.last_state = None
self.current_state = None
self.utg = UTG(device=device, app=app, random_input=random_input)
self.script_event_idx = 0
if self.device.humanoid is not None:
self.humanoid_view_trees = []
self.humanoid_events = []
def generate_event(self, input_manager):
"""
generate an event
@return:
"""
# Get current device state
self.current_state = self.device.get_current_state()
if self.current_state is None:
time.sleep(5)
return KeyEvent(name="BACK")
self.__update_utg()
# update last view trees for humanoid
if self.device.humanoid is not None:
self.humanoid_view_trees = self.humanoid_view_trees + [self.current_state.view_tree]
if len(self.humanoid_view_trees) > 4:
self.humanoid_view_trees = self.humanoid_view_trees[1:]
event = None
# if the previous operation is not finished, continue
if len(self.script_events) > self.script_event_idx:
event = self.script_events[self.script_event_idx].get_transformed_event(self)
self.script_event_idx += 1
# First try matching a state defined in the script
if event is None and self.script is not None:
operation = self.script.get_operation_based_on_state(self.current_state)
if operation is not None:
self.script_events = operation.events
# restart script
event = self.script_events[0].get_transformed_event(self)
self.script_event_idx = 1
if event is None:
old_state, event = self.generate_event_based_on_utg(input_manager)
time.sleep(3)
# update last events for humanoid
if self.device.humanoid is not None:
self.humanoid_events = self.humanoid_events + [event]
if len(self.humanoid_events) > 3:
self.humanoid_events = self.humanoid_events[1:]
self.last_state = self.current_state if old_state is None else old_state
self.last_event = event
return event
def __update_utg(self):
self.utg.add_transition(self.last_event, self.last_state, self.current_state)
@abstractmethod
def generate_event_based_on_utg(self, input_manager):
"""
generate an event based on UTG
:return: InputEvent
"""
pass
class UtgNaiveSearchPolicy(UtgBasedInputPolicy):
"""
depth-first strategy to explore UFG (old)
"""
def __init__(self, device, app, random_input, search_method):
super(UtgNaiveSearchPolicy, self).__init__(device, app, random_input)
self.logger = logging.getLogger(self.__class__.__name__)
self.explored_views = set()
self.state_transitions = set()
self.search_method = search_method
self.last_event_flag = ""
self.last_event_str = None
self.last_state = None
self.preferred_buttons = ["yes", "ok", "activate", "detail", "more", "access",
"allow", "check", "agree", "try", "go", "next"]
def generate_event_based_on_utg(self):
"""
generate an event based on current device state
note: ensure these fields are properly maintained in each transaction:
last_event_flag, last_touched_view, last_state, exploited_views, state_transitions
@return: InputEvent
"""
self.save_state_transition(self.last_event_str, self.last_state, self.current_state)
if self.device.is_foreground(self.app):
# the app is in foreground, clear last_event_flag
self.last_event_flag = EVENT_FLAG_STARTED
else:
number_of_starts = self.last_event_flag.count(EVENT_FLAG_START_APP)
# If we have tried too many times but the app is still not started, stop DroidBot
if number_of_starts > MAX_NUM_RESTARTS:
raise InputInterruptedException("The app cannot be started.")
# if app is not started, try start it
if self.last_event_flag.endswith(EVENT_FLAG_START_APP):
# It seems the app stuck at some state, and cannot be started
# just pass to let viewclient deal with this case
self.logger.info("The app had been restarted %d times.", number_of_starts)
self.logger.info("Trying to restart app...")
pass
else:
start_app_intent = self.app.get_start_intent()
self.last_event_flag += EVENT_FLAG_START_APP
self.last_event_str = EVENT_FLAG_START_APP
return IntentEvent(start_app_intent)
# select a view to click
view_to_touch = self.select_a_view(self.current_state)
# if no view can be selected, restart the app
if view_to_touch is None:
stop_app_intent = self.app.get_stop_intent()
self.last_event_flag += EVENT_FLAG_STOP_APP
self.last_event_str = EVENT_FLAG_STOP_APP
return IntentEvent(stop_app_intent)
view_to_touch_str = view_to_touch['view_str']
if view_to_touch_str.startswith('BACK'):
result = KeyEvent('BACK')
else:
result = TouchEvent(view=view_to_touch)
self.last_event_flag += EVENT_FLAG_TOUCH
self.last_event_str = view_to_touch_str
self.save_explored_view(self.current_state, self.last_event_str)
return result
def select_a_view(self, state):
"""
select a view in the view list of given state, let droidbot touch it
@param state: DeviceState
@return:
"""
views = []
for view in state.views:
if view['enabled'] and len(view['children']) == 0:
views.append(view)
if self.random_input:
random.shuffle(views)
# add a "BACK" view, consider go back first/last according to search policy
mock_view_back = {'view_str': 'BACK_%s' % state.foreground_activity,
'text': 'BACK_%s' % state.foreground_activity}
if self.search_method == POLICY_NAIVE_DFS:
views.append(mock_view_back)
elif self.search_method == POLICY_NAIVE_BFS:
views.insert(0, mock_view_back)
# first try to find a preferable view
for view in views:
view_text = view['text'] if view['text'] is not None else ''
view_text = view_text.lower().strip()
if view_text in self.preferred_buttons \
and (state.foreground_activity, view['view_str']) not in self.explored_views:
self.logger.info("selected an preferred view: %s" % view['view_str'])
return view
# try to find a un-clicked view
for view in views:
if (state.foreground_activity, view['view_str']) not in self.explored_views:
self.logger.info("selected an un-clicked view: %s" % view['view_str'])
return view
# if all enabled views have been clicked, try jump to another activity by clicking one of state transitions
if self.random_input:
random.shuffle(views)
transition_views = {transition[0] for transition in self.state_transitions}
for view in views:
if view['view_str'] in transition_views:
self.logger.info("selected a transition view: %s" % view['view_str'])
return view
# no window transition found, just return a random view
# view = views[0]
# self.logger.info("selected a random view: %s" % view['view_str'])
# return view
# DroidBot stuck on current state, return None
self.logger.info("no view could be selected in state: %s" % state.tag)
return None
def save_state_transition(self, event_str, old_state, new_state):
"""
save the state transition
@param event_str: str, representing the event cause the transition
@param old_state: DeviceState
@param new_state: DeviceState
@return:
"""
if event_str is None or old_state is None or new_state is None:
return
if new_state.is_different_from(old_state):
self.state_transitions.add((event_str, old_state.tag, new_state.tag))
def save_explored_view(self, state, view_str):
"""
save the explored view
@param state: DeviceState, where the view located
@param view_str: str, representing a view
@return:
"""
if not state:
return
state_activity = state.foreground_activity
self.explored_views.add((state_activity, view_str))
class UtgGreedySearchPolicy(UtgBasedInputPolicy):
"""
DFS/BFS (according to search_method) strategy to explore UFG (new)
"""
def __init__(self, device, app, random_input, search_method):
super(UtgGreedySearchPolicy, self).__init__(device, app, random_input)
self.logger = logging.getLogger(self.__class__.__name__)
self.search_method = search_method
self.preferred_buttons = ["yes", "ok", "activate", "detail", "more", "access",
"allow", "check", "agree", "try", "go", "next"]
self.__nav_target = None
self.__nav_num_steps = -1
self.__num_restarts = 0
self.__num_steps_outside = 0
self.__event_trace = ""
self.__missed_states = set()
self.__random_explore = False
def generate_event_based_on_utg(self, input_manager):
"""
generate an event based on current UTG
@return: InputEvent
"""
current_state = self.current_state
self.logger.info("Current state: %s" % current_state.state_str)
if current_state.state_str in self.__missed_states:
self.__missed_states.remove(current_state.state_str)
if current_state.get_app_activity_depth(self.app) < 0:
# If the app is not in the activity stack
start_app_intent = self.app.get_start_intent()
# It seems the app stucks at some state, has been
# 1) force stopped (START, STOP)
# just start the app again by increasing self.__num_restarts
# 2) started at least once and cannot be started (START)
# pass to let viewclient deal with this case
# 3) nothing
# a normal start. clear self.__num_restarts.
if self.__event_trace.endswith(EVENT_FLAG_START_APP + EVENT_FLAG_STOP_APP) \
or self.__event_trace.endswith(EVENT_FLAG_START_APP):
self.__num_restarts += 1
self.logger.info("The app had been restarted %d times.", self.__num_restarts)
else:
self.__num_restarts = 0
# pass (START) through
if not self.__event_trace.endswith(EVENT_FLAG_START_APP):
if self.__num_restarts > MAX_NUM_RESTARTS:
# If the app had been restarted too many times, enter random mode
msg = "The app had been restarted too many times. Entering random mode."
self.logger.info(msg)
self.__random_explore = True
else:
# Start the app
self.__event_trace += EVENT_FLAG_START_APP
self.logger.info("Trying to start the app...")
return IntentEvent(intent=start_app_intent)
elif current_state.get_app_activity_depth(self.app) > 0:
# If the app is in activity stack but is not in foreground
self.__num_steps_outside += 1
if self.__num_steps_outside > MAX_NUM_STEPS_OUTSIDE:
# If the app has not been in foreground for too long, try to go back
if self.__num_steps_outside > MAX_NUM_STEPS_OUTSIDE_KILL:
stop_app_intent = self.app.get_stop_intent()
go_back_event = IntentEvent(stop_app_intent)
else:
go_back_event = KeyEvent(name="BACK")
self.__event_trace += EVENT_FLAG_NAVIGATE
self.logger.info("Going back to the app...")
return go_back_event
else:
# If the app is in foreground
self.__num_steps_outside = 0
# Get all possible input events
possible_events = current_state.get_possible_input()
if self.random_input:
random.shuffle(possible_events)
if self.search_method == POLICY_GREEDY_DFS:
possible_events.append(KeyEvent(name="BACK"))
elif self.search_method == POLICY_GREEDY_BFS:
possible_events.insert(0, KeyEvent(name="BACK"))
# get humanoid result, use the result to sort possible events
# including back events
if self.device.humanoid is not None:
possible_events = self.__sort_inputs_by_humanoid(possible_events)
# If there is an unexplored event, try the event first
for input_event in possible_events:
if not self.utg.is_event_explored(event=input_event, state=current_state):
self.logger.info("Trying an unexplored event.")
self.__event_trace += EVENT_FLAG_EXPLORE
return input_event
target_state = self.__get_nav_target(current_state)
if target_state:
navigation_steps = self.utg.get_navigation_steps(from_state=current_state, to_state=target_state)
if navigation_steps and len(navigation_steps) > 0:
self.logger.info("Navigating to %s, %d steps left." % (target_state.state_str, len(navigation_steps)))
self.__event_trace += EVENT_FLAG_NAVIGATE
return navigation_steps[0][1]
if self.__random_explore:
self.logger.info("Trying random event.")
random.shuffle(possible_events)
return possible_events[0]
# If couldn't find a exploration target, stop the app
stop_app_intent = self.app.get_stop_intent()
self.logger.info("Cannot find an exploration target. Trying to restart app...")
self.__event_trace += EVENT_FLAG_STOP_APP
return IntentEvent(intent=stop_app_intent)
def __sort_inputs_by_humanoid(self, possible_events):
if sys.version.startswith("3"):
else:
proxy = ServerProxy("http://%s/" % self.device.humanoid)
request_json = {
"history_view_trees": self.humanoid_view_trees,
"history_events": [x.__dict__ for x in self.humanoid_events],
"possible_events": [x.__dict__ for x in possible_events],
"screen_res": [self.device.display_info["width"],
self.device.display_info["height"]]
}
result = json.loads(proxy.predict(json.dumps(request_json)))
new_idx = result["indices"]
text = result["text"]
new_events = []
# get rid of infinite recursive by randomizing first event
if not self.utg.is_state_reached(self.current_state):
new_first = random.randint(0, len(new_idx) - 1)
new_idx[0], new_idx[new_first] = new_idx[new_first], new_idx[0]
for idx in new_idx:
if isinstance(possible_events[idx], SetTextEvent):
possible_events[idx].text = text
new_events.append(possible_events[idx])
return new_events
def __get_nav_target(self, current_state):
# If last event is a navigation event
if self.__nav_target and self.__event_trace.endswith(EVENT_FLAG_NAVIGATE):
navigation_steps = self.utg.get_navigation_steps(from_state=current_state, to_state=self.__nav_target)
if navigation_steps and 0 < len(navigation_steps) <= self.__nav_num_steps:
# If last navigation was successful, use current nav target
self.__nav_num_steps = len(navigation_steps)
return self.__nav_target
else:
# If last navigation was failed, add nav target to missing states
self.__missed_states.add(self.__nav_target.state_str)
reachable_states = self.utg.get_reachable_states(current_state)
if self.random_input:
random.shuffle(reachable_states)
for state in reachable_states:
# Only consider foreground states
if state.get_app_activity_depth(self.app) != 0:
continue
# Do not consider missed states
if state.state_str in self.__missed_states:
continue
# Do not consider explored states
if self.utg.is_state_explored(state):
continue
self.__nav_target = state
navigation_steps = self.utg.get_navigation_steps(from_state=current_state, to_state=self.__nav_target)
if len(navigation_steps) > 0:
self.__nav_num_steps = len(navigation_steps)
return state
self.__nav_target = None
self.__nav_num_steps = -1
return None
class UtgReplayPolicy(InputPolicy):
"""
Replay DroidBot output generated by UTG policy
"""
def __init__(self, device, app, replay_output):
super(UtgReplayPolicy, self).__init__(device, app)
self.logger = logging.getLogger(self.__class__.__name__)
self.replay_output = replay_output
event_dir = os.path.join(replay_output, "events")
self.event_paths = sorted([os.path.join(event_dir, x) for x in
next(os.walk(event_dir))[2]
if x.endswith(".json")])
# skip HOME and start app intent
self.device = device
self.app = app
self.event_idx = 2
self.num_replay_tries = 0
self.utg = UTG(device=device, app=app, random_input=None)
self.last_event = None
self.last_state = None
self.current_state = None
def generate_event(self):
"""
generate an event based on replay_output
@return: InputEvent
"""
while self.event_idx < len(self.event_paths) and \
self.num_replay_tries < MAX_REPLY_TRIES:
self.num_replay_tries += 1
current_state = self.device.get_current_state()
if current_state is None:
time.sleep(5)
self.num_replay_tries = 0
return KeyEvent(name="BACK")
curr_event_idx = self.event_idx
self.__update_utg()
while curr_event_idx < len(self.event_paths):
event_path = self.event_paths[curr_event_idx]
with open(event_path, "r") as f:
curr_event_idx += 1
try:
event_dict = json.load(f)
except Exception as e:
self.logger.info("Loading %s failed" % event_path)
continue
if event_dict["start_state"] != current_state.state_str:
continue
if not self.device.is_foreground(self.app):
# if current app is in background, bring it to foreground
component = self.app.get_package_name()
if self.app.get_main_activity():
component += "/%s" % self.app.get_main_activity()
return IntentEvent(Intent(suffix=component))
self.logger.info("Replaying %s" % event_path)
self.event_idx = curr_event_idx
self.num_replay_tries = 0
# return InputEvent.from_dict(event_dict["event"])
event = InputEvent.from_dict(event_dict["event"])
self.last_state = self.current_state
self.last_event = event
return event
time.sleep(5)
# raise InputInterruptedException("No more record can be replayed.")
def __update_utg(self):
self.utg.add_transition(self.last_event, self.last_state, self.current_state)
class ManualPolicy(UtgBasedInputPolicy):
"""
manually explore UFG
"""
def __init__(self, device, app):
super(ManualPolicy, self).__init__(device, app, False)
self.logger = logging.getLogger(self.__class__.__name__)
self.__first_event = True
def generate_event_based_on_utg(self):
"""
generate an event based on current UTG
@return: InputEvent
"""
if self.__first_event:
self.__first_event = False
self.logger.info("Trying to start the app...")
start_app_intent = self.app.get_start_intent()
return IntentEvent(intent=start_app_intent)
else:
return ManualEvent()
class TaskPolicy(UtgBasedInputPolicy):
def __init__(self, device, app, random_input, task, use_memory=True, debug_mode=False):
super(TaskPolicy, self).__init__(device, app, random_input)
self.logger = logging.getLogger(self.__class__.__name__)
self.task = task
self.__nav_target = None
self.__nav_num_steps = -1
self.__num_restarts = 0
self.__num_steps_outside = 0
self.__event_trace = ""
self.__missed_states = set()
self.__random_explore = random_input
self.__action_history = []
self.__thought_history = []
self.use_memory = use_memory
# if use_memory:
# self.memory = Memory(app_name=self.app.app_name, app_output_path=self.device.output_dir)
if self.use_memory:
self.similar_ele_path, self.similar_ele_function, self.similar_ele_statement = self.get_most_similar_element()
if not self.similar_ele_function:
self.use_memory = False
print('=============\nWarning: Did not find the memory of this app, the app memory is disabled\n=============')
else:
print(f'============\nFound element: {self.similar_ele_statement}\nPath: {self.similar_ele_path}\nFunction: {self.similar_ele_function}\n============')
self.state_ele_memory = {} # memorize some important states that contain elements of insight
def get_most_similar_element(self):
model = INSTRUCTOR('hkunlp/instructor-xl')
task_embedding = model.encode('task: ' + self.task).reshape(1, -1)
with open('memory/node_filtered_elements.json') as file:
ele_statements = json.load(file)
with open('memory/element_description.json') as file:
ele_functions = json.load(file)
with open('memory/embedded_elements_desc.json') as file:
embeddings = json.load(file)
app_name = self.device.output_dir.split('/')[-1]
if app_name not in embeddings.keys():
return None, None, None
app_embeddings = embeddings[app_name]
# similarities = {}
max_similarity, similar_ele_idx = -9999, -9999
for state_str, elements in app_embeddings.items():
# if the target element is in the first ui, no onclick is needed
# if ele_statements[app_name][state_str]['path'] == []:
# continue
# similarities[state_str] = []
for idx, ele in enumerate(elements):
if ele:
npele = np.array(ele).reshape(1, -1)
similarity = cosine_similarity(task_embedding, npele)[0][0]
else:
similarity = -9999
# similarities[state_str].append(similarity)
if similarity > max_similarity:
max_similarity = similarity
similar_ele_idx = idx
similar_state_str = state_str
similar_ele = ele_statements[app_name][similar_state_str]['elements'][similar_ele_idx]
similar_ele_path = ele_statements[app_name][similar_state_str]['path']
similar_ele_desc = ele_functions[app_name][similar_state_str][similar_ele_idx]
del model
return similar_ele_path, similar_ele_desc, similar_ele
def _scroll_to_top(self, scroller, all_views_for_mark, old_state=None):
prefix_scroll_event = []
if old_state is None:
old_state = self.current_state
for _ in range(MAX_SCROLL_NUM): # first scroll up to the top | self.device.send_event(ScrollEvent(view=scroller, direction="UP")) | 1 | 2023-10-23 03:32:58+00:00 | 12k |
cvlab-yonsei/ACLS | calibrate/evaluation/calibrate_evaluator.py | [
{
"identifier": "DatasetEvaluator",
"path": "calibrate/evaluation/evaluator.py",
"snippet": "class DatasetEvaluator(metaclass=ABCMeta):\n \"\"\"\n Base class for a dataset evaluator\n \"\"\"\n @abstractmethod\n def reset(self):\n \"\"\"\n Preparation for a new round of evalu... | import logging
import numpy as np
import torch
import torch.nn.functional as F
from terminaltables import AsciiTable
from torch import nn
from .evaluator import DatasetEvaluator
from .metrics import ECELoss, AdaptiveECELoss, ClasswiseECELoss
from .reliability_diagram import ReliabilityDiagram
from calibrate.utils.torch_helper import to_numpy | 8,777 |
logger = logging.getLogger(__name__)
class CalibrateEvaluator(DatasetEvaluator):
def __init__(self, num_classes, num_bins=15, device="cuda:0") -> None:
self.num_classes = num_classes
self.num_bins = num_bins
self.device = device
self.reset()
def reset(self) -> None:
self.logits = None
self.labels = None
def num_samples(self):
return (
self.labels.shape[0]
if self.labels is not None
else 0
)
def main_metric(self) -> None:
return "ece"
def update(self, logits: torch.Tensor, labels: torch.Tensor) -> None:
"""update
Args:
logits (torch.Tensor): n x num_classes
label (torch.Tensor): n x 1
"""
assert logits.shape[0] == labels.shape[0]
if self.logits is None:
self.logits = logits
self.labels = labels
else:
self.logits = torch.cat((self.logits, logits), dim=0)
self.labels = torch.cat((self.labels, labels), dim=0)
def mean_score(self, print=False, all_metric=True):
nll_criterion = nn.CrossEntropyLoss().to(self.device)
ece_criterion = ECELoss(self.num_bins).to(self.device)
aece_criterion = AdaptiveECELoss(self.num_bins).to(self.device)
cece_criterion = ClasswiseECELoss(self.num_bins).to(self.device)
nll = nll_criterion(self.logits, self.labels).item()
ece = ece_criterion(self.logits, self.labels).item()
aece = aece_criterion(self.logits, self.labels).item()
cece = cece_criterion(self.logits, self.labels).item()
metric = {"nll": nll, "ece": ece, "aece": aece, "cece": cece}
columns = ["samples", "nll", "ece", "aece", "cece"]
table_data = [columns]
table_data.append(
[
self.num_samples(),
"{:.5f}".format(nll),
"{:.5f}".format(ece),
"{:.5f}".format(aece),
"{:.5f}".format(cece),
]
)
if print:
table = AsciiTable(table_data)
logger.info("\n" + table.table)
if all_metric:
return metric, table_data
else:
return metric[self.main_metric()]
def plot_reliability_diagram(self, title=""):
diagram = ReliabilityDiagram(bins=25, style="curve")
probs = F.softmax(self.logits, dim=1)
fig_reliab, fig_hist = diagram.plot(
|
logger = logging.getLogger(__name__)
class CalibrateEvaluator(DatasetEvaluator):
def __init__(self, num_classes, num_bins=15, device="cuda:0") -> None:
self.num_classes = num_classes
self.num_bins = num_bins
self.device = device
self.reset()
def reset(self) -> None:
self.logits = None
self.labels = None
def num_samples(self):
return (
self.labels.shape[0]
if self.labels is not None
else 0
)
def main_metric(self) -> None:
return "ece"
def update(self, logits: torch.Tensor, labels: torch.Tensor) -> None:
"""update
Args:
logits (torch.Tensor): n x num_classes
label (torch.Tensor): n x 1
"""
assert logits.shape[0] == labels.shape[0]
if self.logits is None:
self.logits = logits
self.labels = labels
else:
self.logits = torch.cat((self.logits, logits), dim=0)
self.labels = torch.cat((self.labels, labels), dim=0)
def mean_score(self, print=False, all_metric=True):
nll_criterion = nn.CrossEntropyLoss().to(self.device)
ece_criterion = ECELoss(self.num_bins).to(self.device)
aece_criterion = AdaptiveECELoss(self.num_bins).to(self.device)
cece_criterion = ClasswiseECELoss(self.num_bins).to(self.device)
nll = nll_criterion(self.logits, self.labels).item()
ece = ece_criterion(self.logits, self.labels).item()
aece = aece_criterion(self.logits, self.labels).item()
cece = cece_criterion(self.logits, self.labels).item()
metric = {"nll": nll, "ece": ece, "aece": aece, "cece": cece}
columns = ["samples", "nll", "ece", "aece", "cece"]
table_data = [columns]
table_data.append(
[
self.num_samples(),
"{:.5f}".format(nll),
"{:.5f}".format(ece),
"{:.5f}".format(aece),
"{:.5f}".format(cece),
]
)
if print:
table = AsciiTable(table_data)
logger.info("\n" + table.table)
if all_metric:
return metric, table_data
else:
return metric[self.main_metric()]
def plot_reliability_diagram(self, title=""):
diagram = ReliabilityDiagram(bins=25, style="curve")
probs = F.softmax(self.logits, dim=1)
fig_reliab, fig_hist = diagram.plot( | to_numpy(probs), to_numpy(self.labels), | 5 | 2023-10-23 09:55:13+00:00 | 12k |
myshell-ai/AIlice | ailice/AIliceWeb.py | [
{
"identifier": "config",
"path": "ailice/common/AConfig.py",
"snippet": "class AConfig():\n def __init__(self):\n def Initialize(self, needOpenaiGPTKey = False):\n def Load(self, configFile: str) -> dict:\n def Store(self, configFile: str):"
},
{
"identifier": "AProcessor",
"pat... | import time
import simplejson as json
import threading
import gradio as gr
import argparse
from termcolor import colored
from ailice.common.AConfig import config
from ailice.core.AProcessor import AProcessor
from ailice.core.llm.ALLMPool import llmPool
from ailice.common.utils.ALogger import ALogger
from ailice.common.ARemoteAccessors import clientPool
from ailice.AServices import StartServices
from ailice.common.APrompts import promptsManager
from ailice.prompts.APromptChat import APromptChat
from ailice.prompts.APromptMain import APromptMain
from ailice.prompts.APromptSearchEngine import APromptSearchEngine
from ailice.prompts.APromptResearcher import APromptResearcher
from ailice.prompts.APromptCoder import APromptCoder
from ailice.prompts.APromptModuleCoder import APromptModuleCoder
from ailice.prompts.APromptModuleLoader import APromptModuleLoader
from ailice.prompts.APromptCoderProxy import APromptCoderProxy
from ailice.prompts.APromptArticleDigest import APromptArticleDigest | 8,625 |
def mainLoop(modelID: str, quantization: str, maxMemory: dict, prompt: str, temperature: float, flashAttention2: bool, contextWindowRatio: float, localExecution: bool, trace: str):
config.Initialize(needOpenaiGPTKey = ("oai:" in modelID))
config.quantization = quantization
config.maxMemory = maxMemory
config.temperature = temperature
config.flashAttention2 = flashAttention2
config.contextWindowRatio = contextWindowRatio
config.localExecution = localExecution
print(colored("The port range of the ext-modules has been changed from 2005-2016 to 59000-59200. If you are using an old version, startup failure will occur after updating the code. Please modify the port number in config.json and rebuild the docker image.", "yellow"))
StartServices()
clientPool.Init()
|
def mainLoop(modelID: str, quantization: str, maxMemory: dict, prompt: str, temperature: float, flashAttention2: bool, contextWindowRatio: float, localExecution: bool, trace: str):
config.Initialize(needOpenaiGPTKey = ("oai:" in modelID))
config.quantization = quantization
config.maxMemory = maxMemory
config.temperature = temperature
config.flashAttention2 = flashAttention2
config.contextWindowRatio = contextWindowRatio
config.localExecution = localExecution
print(colored("The port range of the ext-modules has been changed from 2005-2016 to 59000-59200. If you are using an old version, startup failure will occur after updating the code. Please modify the port number in config.json and rebuild the docker image.", "yellow"))
StartServices()
clientPool.Init()
| for promptCls in [APromptChat, APromptMain, APromptSearchEngine, APromptResearcher, APromptCoder, APromptModuleCoder, APromptModuleLoader, APromptCoderProxy, APromptArticleDigest]: | 15 | 2023-10-16 01:51:14+00:00 | 12k |
city96/ComfyUI_ExtraModels | PixArt/models/PixArtMS.py | [
{
"identifier": "auto_grad_checkpoint",
"path": "PixArt/models/utils.py",
"snippet": "def _ntuple(n):\n def parse(x):\ndef set_grad_checkpoint(model, use_fp32_attention=False, gc_step=1):\n def set_attr(module):\ndef auto_grad_checkpoint(module, *args, **kwargs):\ndef checkpoint_sequential(functio... | import torch
import torch.nn as nn
from tqdm import tqdm
from timm.models.layers import DropPath
from timm.models.vision_transformer import Mlp
from .utils import auto_grad_checkpoint, to_2tuple
from .PixArt_blocks import t2i_modulate, CaptionEmbedder, WindowAttention, MultiHeadCrossAttention, T2IFinalLayer, TimestepEmbedder, SizeEmbedder
from .PixArt import PixArt, get_2d_sincos_pos_embed
| 7,303 | x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
x = self.norm(x)
return x
class PixArtMSBlock(nn.Module):
"""
A PixArt block with adaptive layer norm zero (adaLN-Zero) conditioning.
"""
def __init__(self, hidden_size, num_heads, mlp_ratio=4.0, drop_path=0., window_size=0, input_size=None, use_rel_pos=False, **block_kwargs):
super().__init__()
self.hidden_size = hidden_size
self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
self.attn = WindowAttention(hidden_size, num_heads=num_heads, qkv_bias=True,
input_size=input_size if window_size == 0 else (window_size, window_size),
use_rel_pos=use_rel_pos, **block_kwargs)
self.cross_attn = MultiHeadCrossAttention(hidden_size, num_heads, **block_kwargs)
self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
# to be compatible with lower version pytorch
approx_gelu = lambda: nn.GELU(approximate="tanh")
self.mlp = Mlp(in_features=hidden_size, hidden_features=int(hidden_size * mlp_ratio), act_layer=approx_gelu, drop=0)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.window_size = window_size
self.scale_shift_table = nn.Parameter(torch.randn(6, hidden_size) / hidden_size ** 0.5)
def forward(self, x, y, t, mask=None, **kwargs):
B, N, C = x.shape
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (self.scale_shift_table[None] + t.reshape(B, 6, -1)).chunk(6, dim=1)
x = x + self.drop_path(gate_msa * self.attn(t2i_modulate(self.norm1(x), shift_msa, scale_msa)))
x = x + self.cross_attn(x, y, mask)
x = x + self.drop_path(gate_mlp * self.mlp(t2i_modulate(self.norm2(x), shift_mlp, scale_mlp)))
return x
#############################################################################
# Core PixArt Model #
#################################################################################
class PixArtMS(PixArt):
"""
Diffusion model with a Transformer backbone.
"""
def __init__(
self,
input_size=32,
patch_size=2,
in_channels=4,
hidden_size=1152,
depth=28,
num_heads=16,
mlp_ratio=4.0,
class_dropout_prob=0.1,
learn_sigma=True,
pred_sigma=True,
drop_path: float = 0.,
window_size=0,
window_block_indexes=[],
use_rel_pos=False,
caption_channels=4096,
lewei_scale=1.,
config=None,
**kwargs,
):
super().__init__(
input_size=input_size,
patch_size=patch_size,
in_channels=in_channels,
hidden_size=hidden_size,
depth=depth,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
class_dropout_prob=class_dropout_prob,
learn_sigma=learn_sigma,
pred_sigma=pred_sigma,
drop_path=drop_path,
window_size=window_size,
window_block_indexes=window_block_indexes,
use_rel_pos=use_rel_pos,
lewei_scale=lewei_scale,
config=config,
**kwargs,
)
self.dtype = torch.get_default_dtype()
self.h = self.w = 0
approx_gelu = lambda: nn.GELU(approximate="tanh")
self.t_block = nn.Sequential(
nn.SiLU(),
nn.Linear(hidden_size, 6 * hidden_size, bias=True)
)
self.x_embedder = PatchEmbed(patch_size, in_channels, hidden_size, bias=True)
self.y_embedder = CaptionEmbedder(in_channels=caption_channels, hidden_size=hidden_size, uncond_prob=class_dropout_prob, act_layer=approx_gelu)
self.csize_embedder = SizeEmbedder(hidden_size//3) # c_size embed
self.ar_embedder = SizeEmbedder(hidden_size//3) # aspect ratio embed
drop_path = [x.item() for x in torch.linspace(0, drop_path, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
PixArtMSBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio, drop_path=drop_path[i],
input_size=(input_size // patch_size, input_size // patch_size),
window_size=window_size if i in window_block_indexes else 0,
use_rel_pos=use_rel_pos if i in window_block_indexes else False)
for i in range(depth)
])
self.final_layer = T2IFinalLayer(hidden_size, patch_size, self.out_channels)
self.training = False
self.initialize()
def forward_raw(self, x, t, y, mask=None, data_info=None, **kwargs):
"""
Original forward pass of PixArt.
x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)
t: (N,) tensor of diffusion timesteps
y: (N, 1, 120, C) tensor of class labels
"""
bs = x.shape[0]
c_size, ar = data_info['img_hw'], data_info['aspect_ratio']
self.h, self.w = x.shape[-2]//self.patch_size, x.shape[-1]//self.patch_size
| # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# --------------------------------------------------------
# References:
# GLIDE: https://github.com/openai/glide-text2im
# MAE: https://github.com/facebookresearch/mae/blob/main/models_mae.py
# --------------------------------------------------------
class PatchEmbed(nn.Module):
""" 2D Image to Patch Embedding
"""
def __init__(
self,
patch_size=16,
in_chans=3,
embed_dim=768,
norm_layer=None,
flatten=True,
bias=True,
):
super().__init__()
patch_size = to_2tuple(patch_size)
self.patch_size = patch_size
self.flatten = flatten
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
def forward(self, x):
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
x = self.norm(x)
return x
class PixArtMSBlock(nn.Module):
"""
A PixArt block with adaptive layer norm zero (adaLN-Zero) conditioning.
"""
def __init__(self, hidden_size, num_heads, mlp_ratio=4.0, drop_path=0., window_size=0, input_size=None, use_rel_pos=False, **block_kwargs):
super().__init__()
self.hidden_size = hidden_size
self.norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
self.attn = WindowAttention(hidden_size, num_heads=num_heads, qkv_bias=True,
input_size=input_size if window_size == 0 else (window_size, window_size),
use_rel_pos=use_rel_pos, **block_kwargs)
self.cross_attn = MultiHeadCrossAttention(hidden_size, num_heads, **block_kwargs)
self.norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
# to be compatible with lower version pytorch
approx_gelu = lambda: nn.GELU(approximate="tanh")
self.mlp = Mlp(in_features=hidden_size, hidden_features=int(hidden_size * mlp_ratio), act_layer=approx_gelu, drop=0)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.window_size = window_size
self.scale_shift_table = nn.Parameter(torch.randn(6, hidden_size) / hidden_size ** 0.5)
def forward(self, x, y, t, mask=None, **kwargs):
B, N, C = x.shape
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (self.scale_shift_table[None] + t.reshape(B, 6, -1)).chunk(6, dim=1)
x = x + self.drop_path(gate_msa * self.attn(t2i_modulate(self.norm1(x), shift_msa, scale_msa)))
x = x + self.cross_attn(x, y, mask)
x = x + self.drop_path(gate_mlp * self.mlp(t2i_modulate(self.norm2(x), shift_mlp, scale_mlp)))
return x
#############################################################################
# Core PixArt Model #
#################################################################################
class PixArtMS(PixArt):
"""
Diffusion model with a Transformer backbone.
"""
def __init__(
self,
input_size=32,
patch_size=2,
in_channels=4,
hidden_size=1152,
depth=28,
num_heads=16,
mlp_ratio=4.0,
class_dropout_prob=0.1,
learn_sigma=True,
pred_sigma=True,
drop_path: float = 0.,
window_size=0,
window_block_indexes=[],
use_rel_pos=False,
caption_channels=4096,
lewei_scale=1.,
config=None,
**kwargs,
):
super().__init__(
input_size=input_size,
patch_size=patch_size,
in_channels=in_channels,
hidden_size=hidden_size,
depth=depth,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
class_dropout_prob=class_dropout_prob,
learn_sigma=learn_sigma,
pred_sigma=pred_sigma,
drop_path=drop_path,
window_size=window_size,
window_block_indexes=window_block_indexes,
use_rel_pos=use_rel_pos,
lewei_scale=lewei_scale,
config=config,
**kwargs,
)
self.dtype = torch.get_default_dtype()
self.h = self.w = 0
approx_gelu = lambda: nn.GELU(approximate="tanh")
self.t_block = nn.Sequential(
nn.SiLU(),
nn.Linear(hidden_size, 6 * hidden_size, bias=True)
)
self.x_embedder = PatchEmbed(patch_size, in_channels, hidden_size, bias=True)
self.y_embedder = CaptionEmbedder(in_channels=caption_channels, hidden_size=hidden_size, uncond_prob=class_dropout_prob, act_layer=approx_gelu)
self.csize_embedder = SizeEmbedder(hidden_size//3) # c_size embed
self.ar_embedder = SizeEmbedder(hidden_size//3) # aspect ratio embed
drop_path = [x.item() for x in torch.linspace(0, drop_path, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
PixArtMSBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio, drop_path=drop_path[i],
input_size=(input_size // patch_size, input_size // patch_size),
window_size=window_size if i in window_block_indexes else 0,
use_rel_pos=use_rel_pos if i in window_block_indexes else False)
for i in range(depth)
])
self.final_layer = T2IFinalLayer(hidden_size, patch_size, self.out_channels)
self.training = False
self.initialize()
def forward_raw(self, x, t, y, mask=None, data_info=None, **kwargs):
"""
Original forward pass of PixArt.
x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)
t: (N,) tensor of diffusion timesteps
y: (N, 1, 120, C) tensor of class labels
"""
bs = x.shape[0]
c_size, ar = data_info['img_hw'], data_info['aspect_ratio']
self.h, self.w = x.shape[-2]//self.patch_size, x.shape[-1]//self.patch_size
| pos_embed = torch.from_numpy(get_2d_sincos_pos_embed(self.pos_embed.shape[-1], (self.h, self.w), lewei_scale=self.lewei_scale, base_size=self.base_size)).unsqueeze(0).to(x.device).to(self.dtype)
| 9 | 2023-10-20 21:19:44+00:00 | 12k |
apple/ml-nvas3d | demo/generate_demo_video.py | [
{
"identifier": "convolve_moving_receiver",
"path": "nvas3d/utils/dynamic_utils.py",
"snippet": "def convolve_moving_receiver(\n source_audio: np.ndarray,\n rirs: np.ndarray,\n interp_index: T.List[int],\n interp_weight: T.List[float]\n) -> np.ndarray:\n \"\"\"\n Apply convolution betw... | import os
import json
import argparse
import itertools
import subprocess
import typing as T
import torch
import imageio
import torchaudio
import numpy as np
import matplotlib.pyplot as plt
from moviepy.editor import *
from nvas3d.utils.dynamic_utils import convolve_moving_receiver, setup_dynamic_interp
from nvas3d.utils.audio_utils import clip_two, clip_all
from soundspaces_nvas3d.utils.ss_utils import create_scene, render_rir_parallel
from soundspaces_nvas3d.utils.aihabitat_utils import load_room_grid
from soundspaces_nvas3d.soundspaces_nvas3d import Receiver, Source, Scene | 8,312 | #
# For licensing see accompanying LICENSE file.
# Copyright (C) 2023 Apple Inc. All Rights Reserved.
#
def normalize(input: torch.Tensor) -> torch.Tensor:
output = (input - input.min()) / (input.max() - input.min())
output = 2 * output - 1
return output
def configure_scene_from_metadata(
metadata: T.Dict[str, T.Any],
image_size: T.Tuple[int, int] = (1000, 1000),
hfov: float = 90.0,
use_placeholder_mesh: bool = False
| #
# For licensing see accompanying LICENSE file.
# Copyright (C) 2023 Apple Inc. All Rights Reserved.
#
def normalize(input: torch.Tensor) -> torch.Tensor:
output = (input - input.min()) / (input.max() - input.min())
output = 2 * output - 1
return output
def configure_scene_from_metadata(
metadata: T.Dict[str, T.Any],
image_size: T.Tuple[int, int] = (1000, 1000),
hfov: float = 90.0,
use_placeholder_mesh: bool = False | ) -> Scene: | 9 | 2023-10-19 05:35:54+00:00 | 12k |
tiejundong/FlexPose | FlexPose/utils/prediction.py | [
{
"identifier": "FlexPose",
"path": "FlexPose/model/layers.py",
"snippet": "class FlexPose(torch.nn.Module):\n def __init__(self, args=None, param_path=None):\n super(FlexPose, self).__init__()\n if args is not None:\n self.init_param(args)\n else:\n self.in... | import os
import shutil
import sys
import argparse
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
import pyrosetta
import pyrosetta
from biopandas.pdb import PandasPdb
from collections import defaultdict
from ray.util.multiprocessing import Pool
from rdkit import Chem
from rdkit.Chem import AllChem, Descriptors
from einops import rearrange, repeat
from torch_scatter import scatter_min, scatter_add
from FlexPose.model.layers import FlexPose
from FlexPose.utils.common import *
from FlexPose.preprocess.prepare_for_training import try_prepare_task
from FlexPose.utils.APOPDBbind_data import pred_ens
from FlexPose.utils.pdbbind_preprocess import *
from FlexPose.utils.data_utils import *
from FlexPose.model.MMFF import MMFF_keys, MMFF_pad_dim, get_MMFF_param
from tqdm.notebook import tqdm, trange
from tqdm import tqdm, trange
from modeller import Environ
from modeller.scripts import complete_pdb | 8,748 | opts = '-mute true -ignore_unrecognized_res true'
pyrosetta.distributed.init(opts)
pose = pyrosetta.io.pose_from_pdb(fixed_protein_path)
dic_tor = get_torsion_from_pose(pose)
ref_mol = read_rdkit_mol(ref_path, silence=True)
ref_coor = get_true_posi(ref_mol)
biodf_protein = PandasPdb().read_pdb(fixed_protein_path)
df_protein = biodf_protein.df['ATOM']
df_protein['chain_resi'] = df_protein['chain_id'].astype(str) + '_' + df_protein['residue_number'].astype(str)
df_pocket, sele_res = get_pocket(df_protein, ref_coor, max_len_protein=max_len_pocket)
SCtorsion_data = get_torsion(dic_tor, df_protein, df_pocket)
protein_data = encode_pocket(df_pocket) + [SCtorsion_data]
assert protein_data[0].shape[0] == SCtorsion_data[0].shape[0]
assert protein_data[0].shape[0] <= max_len_pocket, 'pocket residues need less than 150'
# os.remove(fixed_protein_path)
dic_data = dict(
ligand_data=ligand_data,
protein_data=protein_data,
protein_path=fixed_protein_path,
ligand_path=l_path,
sele_res=sele_res,
dic_MMFF_param=dic_MMFF_param,
)
pickle.dump(dic_data, open(cache_path + '/{}.pkl'.format(idx), 'wb'))
return True
def preprare_input_data(input_list, cache_path, prepare_data_with_multi_cpu):
delmkdir(cache_path)
tasks = []
for idx, f_name_list in enumerate(input_list):
tasks.append((prepare_single_input, (f_name_list, idx, cache_path)))
fail = 0
if prepare_data_with_multi_cpu:
pool = Pool()
print('Preparing input data...')
for r in pool.map(try_prepare_task, tasks):
if not r:
fail += 1
else:
for task in tqdm(tasks, desc='Preparing input data'):
r = try_prepare_task(task)
if not r:
fail += 1
print(f'Prepared data: {len(tasks) - fail}/{len(tasks)}, {(len(tasks) - fail) / len(tasks) * 100:.2f}%')
def read_input(protein, ligand, ref_pocket_center, batch_csv):
if batch_csv is not None:
df_input = pd.read_csv(batch_csv)
protein_list = df_input['protein'].values
ligand_list = df_input['ligand'].values
ref_pocket_center_list = df_input['ref_pocket_center'].values
else:
assert protein is not None and ligand is not None and ref_pocket_center is not None
if not isinstance(protein, list):
protein_list = [protein]
else:
protein_list = protein
if not isinstance(ligand, list):
ligand_list = [ligand]
else:
ligand_list = ligand
if not isinstance(ref_pocket_center, list):
ref_pocket_center_list = [ref_pocket_center]
else:
ref_pocket_center_list = ref_pocket_center
input_list = [(i, j, k) for i, j, k in zip(protein_list, ligand_list, ref_pocket_center_list)]
return input_list
class InferDataset(torch.utils.data.Dataset):
def __init__(self, args, cache_path, ens=1):
self.data_path = cache_path
self.data_list = [i.split('.')[0] for i in os.listdir(cache_path) if i.endswith('.pkl')]
self.ens = ens
self.coor_scale = args.coor_scale
self.max_len_pocket = args.max_len_pocket
self.max_len_ligand = args.max_len_ligand
self.l_init_sigma = args.l_init_sigma / self.coor_scale
def __getitem__(self, i):
if self.ens > 1:
complex_graph = []
for e in range(self.ens):
complex_graph.append(self.get_complex(self.data_list[i]))
complex_graph = collate_input(complex_graph) # use collate_dummy in loader
else:
complex_graph = self.get_complex(self.data_list[i])
return complex_graph
def get_complex(self, idx):
# =============== get dict data ===============
dic_data = pickle.load(open(f'{self.data_path}/{idx}.pkl', 'rb'))
ligand_data = dic_data['ligand_data']
protein_data = dic_data['protein_data']
protein_path = dic_data['protein_path']
ligand_path = dic_data['ligand_path']
sele_res = dic_data['sele_res']
dic_MMFF_param = dic_data['dic_MMFF_param']
# =============== ligand ===============
l_x_sca_init, l_edge_sca_init, l_coor_ref, l_match, l_dismap = ligand_data
l_match = l_match.reshape(-1)
n_match = len(l_match) // len(l_x_sca_init)
l_nomatch = repeat(torch.arange(0, len(l_x_sca_init)), 'm -> (n m)', n=n_match)
# get ligand MMFF (if exists)
if dic_MMFF_param is not None:
| sys.path.append('/'.join(os.path.abspath(__file__).split('/')[:-2]))
opts = '-mute true -ignore_unrecognized_res true'
pyrosetta.distributed.init(opts)
if is_notebook():
else:
def set_device(device):
if device == 'cpu':
torch.set_num_threads(16)
else:
torch.cuda.set_device(device)
def get_torsion_from_pose(pose):
bb_torsion = []
sc_torsion = []
for i in range(1, pose.size() + 1):
try:
res = pose.residue(i)
assert res.name3() in ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU', 'GLY', 'HIS', 'ILE',
'LEU', 'LYS', 'MET', 'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL']
phi_psi = [pose.phi(i), pose.psi(i)]
chi = [c for c in res.chi()]
bb_torsion.append(phi_psi)
sc_torsion.append(chi)
except:
bb_torsion.append([None])
sc_torsion.append([None])
return {'bb_torsion': bb_torsion, 'sc_torsion': sc_torsion}
def prepare_single_input(tupin):
f_name_list, idx, cache_path = tupin
p_path, l_path, ref_path = f_name_list
max_len_ligand = 150
max_len_pocket = 150
# =========== ligand encoding ===========
ligand_mol = read_rdkit_mol(l_path)
if l_path.endswith('mol2'):
ligand_template = ligand_mol
else:
mol2 = '.'.join(l_path.split('.')[:-1]) + '.mol2'
if os.path.exists(mol2):
try:
ligand_template = Chem.MolFromMol2File(mol2)
ligand_mol = AllChem.AssignBondOrdersFromTemplate(ligand_template, ligand_mol)
print(f'Found mol2 {mol2} as input.')
except:
ligand_template = ligand_mol
else:
ligand_template = ligand_mol
if ligand_mol.GetNumConformers() == 0:
AllChem.EmbedMolecule(ligand_mol, maxAttempts=10, useRandomCoords=True, clearConfs=False)
ff = Chem.rdForceFieldHelpers.MMFFGetMoleculeForceField(
ligand_mol, Chem.rdForceFieldHelpers.MMFFGetMoleculeProperties(ligand_mol))
for atom_i in range(ligand_mol.GetNumAtoms()):
ff.MMFFAddPositionConstraint(atom_i, 1, 100) # maxDispl: maximum displacement
ff.Minimize(maxIts=20)
try:
dic_MMFF_param = get_MMFF_param(ligand_template)
except:
dic_MMFF_param = None
ligand_node_features = get_node_feature(ligand_template, 'ligand')
ligand_edge, ligand_edge_features = get_ligand_edge_feature(ligand_template)
ligand_match = get_ligand_match(ligand_template)
ligand_dismap = get_ligand_unrotable_distance(ligand_template) # not use in our model
ligand_coor_true = get_true_posi(ligand_mol)
ligand_coor_true = ligand_coor_true[get_ligand_match(ligand_mol, ligand_template)[0]]
ligand_data = [ligand_node_features, ligand_edge_features, ligand_coor_true, ligand_match, ligand_dismap]
assert len(ligand_node_features) <= max_len_ligand, 'ligand atoms need less than 150'
# =========== protein encoding ===========
# load modeller again for ray
with suppress_stdout_stderr():
env_ = Environ()
env_.libs.topology.read(file='$(LIB)/top_heav.lib')
env_.libs.parameters.read(file='$(LIB)/par.lib')
fixed_protein_path = cache_path + f'/{idx}_protein_tmp.pdb'
pdb_m = complete_pdb(env_, p_path)
pdb_m.write(fixed_protein_path)
opts = '-mute true -ignore_unrecognized_res true'
pyrosetta.distributed.init(opts)
pose = pyrosetta.io.pose_from_pdb(fixed_protein_path)
dic_tor = get_torsion_from_pose(pose)
ref_mol = read_rdkit_mol(ref_path, silence=True)
ref_coor = get_true_posi(ref_mol)
biodf_protein = PandasPdb().read_pdb(fixed_protein_path)
df_protein = biodf_protein.df['ATOM']
df_protein['chain_resi'] = df_protein['chain_id'].astype(str) + '_' + df_protein['residue_number'].astype(str)
df_pocket, sele_res = get_pocket(df_protein, ref_coor, max_len_protein=max_len_pocket)
SCtorsion_data = get_torsion(dic_tor, df_protein, df_pocket)
protein_data = encode_pocket(df_pocket) + [SCtorsion_data]
assert protein_data[0].shape[0] == SCtorsion_data[0].shape[0]
assert protein_data[0].shape[0] <= max_len_pocket, 'pocket residues need less than 150'
# os.remove(fixed_protein_path)
dic_data = dict(
ligand_data=ligand_data,
protein_data=protein_data,
protein_path=fixed_protein_path,
ligand_path=l_path,
sele_res=sele_res,
dic_MMFF_param=dic_MMFF_param,
)
pickle.dump(dic_data, open(cache_path + '/{}.pkl'.format(idx), 'wb'))
return True
def preprare_input_data(input_list, cache_path, prepare_data_with_multi_cpu):
delmkdir(cache_path)
tasks = []
for idx, f_name_list in enumerate(input_list):
tasks.append((prepare_single_input, (f_name_list, idx, cache_path)))
fail = 0
if prepare_data_with_multi_cpu:
pool = Pool()
print('Preparing input data...')
for r in pool.map(try_prepare_task, tasks):
if not r:
fail += 1
else:
for task in tqdm(tasks, desc='Preparing input data'):
r = try_prepare_task(task)
if not r:
fail += 1
print(f'Prepared data: {len(tasks) - fail}/{len(tasks)}, {(len(tasks) - fail) / len(tasks) * 100:.2f}%')
def read_input(protein, ligand, ref_pocket_center, batch_csv):
if batch_csv is not None:
df_input = pd.read_csv(batch_csv)
protein_list = df_input['protein'].values
ligand_list = df_input['ligand'].values
ref_pocket_center_list = df_input['ref_pocket_center'].values
else:
assert protein is not None and ligand is not None and ref_pocket_center is not None
if not isinstance(protein, list):
protein_list = [protein]
else:
protein_list = protein
if not isinstance(ligand, list):
ligand_list = [ligand]
else:
ligand_list = ligand
if not isinstance(ref_pocket_center, list):
ref_pocket_center_list = [ref_pocket_center]
else:
ref_pocket_center_list = ref_pocket_center
input_list = [(i, j, k) for i, j, k in zip(protein_list, ligand_list, ref_pocket_center_list)]
return input_list
class InferDataset(torch.utils.data.Dataset):
def __init__(self, args, cache_path, ens=1):
self.data_path = cache_path
self.data_list = [i.split('.')[0] for i in os.listdir(cache_path) if i.endswith('.pkl')]
self.ens = ens
self.coor_scale = args.coor_scale
self.max_len_pocket = args.max_len_pocket
self.max_len_ligand = args.max_len_ligand
self.l_init_sigma = args.l_init_sigma / self.coor_scale
def __getitem__(self, i):
if self.ens > 1:
complex_graph = []
for e in range(self.ens):
complex_graph.append(self.get_complex(self.data_list[i]))
complex_graph = collate_input(complex_graph) # use collate_dummy in loader
else:
complex_graph = self.get_complex(self.data_list[i])
return complex_graph
def get_complex(self, idx):
# =============== get dict data ===============
dic_data = pickle.load(open(f'{self.data_path}/{idx}.pkl', 'rb'))
ligand_data = dic_data['ligand_data']
protein_data = dic_data['protein_data']
protein_path = dic_data['protein_path']
ligand_path = dic_data['ligand_path']
sele_res = dic_data['sele_res']
dic_MMFF_param = dic_data['dic_MMFF_param']
# =============== ligand ===============
l_x_sca_init, l_edge_sca_init, l_coor_ref, l_match, l_dismap = ligand_data
l_match = l_match.reshape(-1)
n_match = len(l_match) // len(l_x_sca_init)
l_nomatch = repeat(torch.arange(0, len(l_x_sca_init)), 'm -> (n m)', n=n_match)
# get ligand MMFF (if exists)
if dic_MMFF_param is not None: | dic_MMFF_param = self.repad_MMFFparam(dic_MMFF_param, MMFF_keys, MMFF_pad_dim) | 3 | 2023-10-19 22:03:51+00:00 | 12k |
openvpi/SingingVocoders | training/univnet.py | [
{
"identifier": "UnivNet",
"path": "models/univnet/univnet.py",
"snippet": "class UnivNet(torch.nn.Module):\n \"\"\"Parallel WaveGAN Generator module.\"\"\"\n\n def __init__(self, h, use_weight_norm=True):\n\n super().__init__()\n\n in_channels = h['model_args']['cond_in_channels']\n... | import logging
import os
import pathlib
import random
import sys
import lightning.pytorch as pl
import matplotlib
import numpy as np
import torch.utils.data
import utils
from typing import Dict
from lightning.pytorch.utilities.rank_zero import rank_zero_debug, rank_zero_info, rank_zero_only
from matplotlib import pyplot as plt
from torch import nn
from torch.utils.data import Dataset
from torchmetrics import Metric, MeanMetric
from models.univnet.univnet import UnivNet
from modules.loss.univloss import univloss
from modules.univ_D.discriminator import MultiPeriodDiscriminator, MultiResSpecDiscriminator
from training.base_task_gan import GanBaseTask
from utils.wav2mel import PitchAdjustableMelSpectrogram | 10,709 | cty=(len(record['spectrogram'].T) * samples_per_frame)
record['audio'] = record['audio'][:cty]
record['audio'] = np.pad(record['audio'], (
0, (len(record['spectrogram'].T) * samples_per_frame) - len(record['audio'])),
mode='constant')
pass
else:
# record['spectrogram'] = record['spectrogram'][start:end].T
record['audio'] = record['audio'][start:end]
record['audio'] = np.pad(record['audio'], (0, (end - start) - len(record['audio'])),
mode='constant')
if self.volume_aug:
for record in minibatch:
if random.random() < self.volume_aug_prob:
audio = record['audio']
audio_mel = record['spectrogram']
max_amp = float(np.max(np.abs(audio))) + 1e-5
max_shift = min(3, np.log(1 / max_amp))
log_mel_shift = random.uniform(-3, max_shift)
# audio *= (10 ** log_mel_shift)
audio *= np.exp(log_mel_shift)
audio_mel += log_mel_shift
audio_mel = torch.clamp(torch.from_numpy(audio_mel), min=np.log(1e-5)).numpy()
record['audio'] = audio
record['spectrogram'] = audio_mel
audio = np.stack([record['audio'] for record in minibatch if 'audio' in record])
spectrogram = np.stack([record['spectrogram'] for record in minibatch if 'spectrogram' in record])
f0 = np.stack([record['f0'] for record in minibatch if 'f0' in record])
return {
'audio': torch.from_numpy(audio).unsqueeze(1),
'mel': torch.from_numpy(spectrogram), 'f0': torch.from_numpy(f0),
}
class stftlog:
def __init__(self,
n_fft=2048,
win_length=2048,
hop_length=512,
center=False,):
self.hop_length=hop_length
self.win_size=win_length
self.n_fft = n_fft
self.win_size = win_length
self.center = center
self.hann_window = {}
def exc(self,y):
hann_window_key = f"{y.device}"
if hann_window_key not in self.hann_window:
self.hann_window[hann_window_key] = torch.hann_window(
self.win_size, device=y.device
)
y = torch.nn.functional.pad(
y.unsqueeze(1),
(
int((self.win_size - self.hop_length) // 2),
int((self.win_size - self.hop_length+1) // 2),
),
mode="reflect",
)
y = y.squeeze(1)
spec = torch.stft(
y,
self.n_fft,
hop_length=self.hop_length,
win_length=self.win_size,
window=self.hann_window[hann_window_key],
center=self.center,
pad_mode="reflect",
normalized=False,
onesided=True,
return_complex=True,
).abs()
return spec
class univnet_task(GanBaseTask):
def __init__(self, config):
super().__init__(config)
self.TF = PitchAdjustableMelSpectrogram( f_min=0,
f_max=None,
n_mels=256,)
self.logged_gt_wav = set()
self.stft=stftlog()
upmel = config['model_args'].get('upmel')
self.upmel=upmel
# if upmel is not None:
# self.noisec=config['model_args']['cond_in_channels']*upmel
# else:
self.noisec = config['model_args']['cond_in_channels']
def build_dataset(self):
self.train_dataset = nsf_HiFigan_dataset(config=self.config,
data_dir=pathlib.Path(self.config['DataIndexPath']) / self.config[
'train_set_name'])
self.valid_dataset = nsf_HiFigan_dataset(config=self.config,
data_dir=pathlib.Path(self.config['DataIndexPath']) / self.config[
'valid_set_name'], infer=True)
def build_model(self):
# cfg=self.config['model_args']
# cfg.update({'sampling_rate':self.config['audio_sample_rate'],'num_mels':self.config['audio_num_mel_bins'],'hop_size':self.config['hop_size']})
# h=AttrDict(cfg)
self.generator=UnivNet(self.config,use_weight_norm=self.config['model_args'].get('use_weight_norm',True))
self.discriminator=nn.ModuleDict({'mrd':MultiResSpecDiscriminator(fft_sizes=self.config['model_args'].get('mrd_fft_sizes',[1024, 2048, 512]),
hop_sizes=self.config['model_args'].get('mrd_hop_sizes',[120, 240, 50]),
win_lengths= self.config['model_args'].get('mrd_win_lengths',[600, 1200, 240]),), 'mpd':MultiPeriodDiscriminator(periods=self.config['model_args']['discriminator_periods'])})
def build_losses_and_metrics(self):
|
# from models.lvc_ddspgan.lvc_ddspgan import DDSPgan
# from models.nsf_HiFigan.models import Generator, AttrDict, MultiScaleDiscriminator, MultiPeriodDiscriminator
def spec_to_figure(spec, vmin=None, vmax=None):
if isinstance(spec, torch.Tensor):
spec = spec.cpu().numpy()
fig = plt.figure(figsize=(12, 9),dpi=100)
plt.pcolor(spec.T, vmin=vmin, vmax=vmax)
plt.tight_layout()
return fig
class nsf_HiFigan_dataset(Dataset):
def __init__(self, config: dict, data_dir, infer=False):
super().__init__()
self.config = config
self.data_dir = data_dir if isinstance(data_dir, pathlib.Path) else pathlib.Path(data_dir)
with open(self.data_dir, 'r', encoding='utf8') as f:
fills = f.read().strip().split('\n')
self.data_index = fills
self.infer = infer
self.volume_aug = self.config['volume_aug']
self.volume_aug_prob = self.config['volume_aug_prob'] if not infer else 0
def __getitem__(self, index):
data_path = self.data_index[index]
data = np.load(data_path)
return {'f0':data['f0'],'spectrogram':data['mel'],'audio':data['audio']}
def __len__(self):
return len(self.data_index)
def collater(self, minibatch):
samples_per_frame = self.config['hop_size']
if self.infer:
crop_mel_frames = 0
else:
crop_mel_frames = self.config['crop_mel_frames']
for record in minibatch:
# Filter out records that aren't long enough.
if len(record['spectrogram']) < crop_mel_frames:
del record['spectrogram']
del record['audio']
del record['f0']
continue
start = random.randint(0, record['spectrogram'].shape[0] - 1 - crop_mel_frames)
end = start + crop_mel_frames
if self.infer:
record['spectrogram'] = record['spectrogram'].T
record['f0'] = record['f0']
else:
record['spectrogram'] = record['spectrogram'][start:end].T
record['f0'] = record['f0'][start:end]
start *= samples_per_frame
end *= samples_per_frame
if self.infer:
cty=(len(record['spectrogram'].T) * samples_per_frame)
record['audio'] = record['audio'][:cty]
record['audio'] = np.pad(record['audio'], (
0, (len(record['spectrogram'].T) * samples_per_frame) - len(record['audio'])),
mode='constant')
pass
else:
# record['spectrogram'] = record['spectrogram'][start:end].T
record['audio'] = record['audio'][start:end]
record['audio'] = np.pad(record['audio'], (0, (end - start) - len(record['audio'])),
mode='constant')
if self.volume_aug:
for record in minibatch:
if random.random() < self.volume_aug_prob:
audio = record['audio']
audio_mel = record['spectrogram']
max_amp = float(np.max(np.abs(audio))) + 1e-5
max_shift = min(3, np.log(1 / max_amp))
log_mel_shift = random.uniform(-3, max_shift)
# audio *= (10 ** log_mel_shift)
audio *= np.exp(log_mel_shift)
audio_mel += log_mel_shift
audio_mel = torch.clamp(torch.from_numpy(audio_mel), min=np.log(1e-5)).numpy()
record['audio'] = audio
record['spectrogram'] = audio_mel
audio = np.stack([record['audio'] for record in minibatch if 'audio' in record])
spectrogram = np.stack([record['spectrogram'] for record in minibatch if 'spectrogram' in record])
f0 = np.stack([record['f0'] for record in minibatch if 'f0' in record])
return {
'audio': torch.from_numpy(audio).unsqueeze(1),
'mel': torch.from_numpy(spectrogram), 'f0': torch.from_numpy(f0),
}
class stftlog:
def __init__(self,
n_fft=2048,
win_length=2048,
hop_length=512,
center=False,):
self.hop_length=hop_length
self.win_size=win_length
self.n_fft = n_fft
self.win_size = win_length
self.center = center
self.hann_window = {}
def exc(self,y):
hann_window_key = f"{y.device}"
if hann_window_key not in self.hann_window:
self.hann_window[hann_window_key] = torch.hann_window(
self.win_size, device=y.device
)
y = torch.nn.functional.pad(
y.unsqueeze(1),
(
int((self.win_size - self.hop_length) // 2),
int((self.win_size - self.hop_length+1) // 2),
),
mode="reflect",
)
y = y.squeeze(1)
spec = torch.stft(
y,
self.n_fft,
hop_length=self.hop_length,
win_length=self.win_size,
window=self.hann_window[hann_window_key],
center=self.center,
pad_mode="reflect",
normalized=False,
onesided=True,
return_complex=True,
).abs()
return spec
class univnet_task(GanBaseTask):
def __init__(self, config):
super().__init__(config)
self.TF = PitchAdjustableMelSpectrogram( f_min=0,
f_max=None,
n_mels=256,)
self.logged_gt_wav = set()
self.stft=stftlog()
upmel = config['model_args'].get('upmel')
self.upmel=upmel
# if upmel is not None:
# self.noisec=config['model_args']['cond_in_channels']*upmel
# else:
self.noisec = config['model_args']['cond_in_channels']
def build_dataset(self):
self.train_dataset = nsf_HiFigan_dataset(config=self.config,
data_dir=pathlib.Path(self.config['DataIndexPath']) / self.config[
'train_set_name'])
self.valid_dataset = nsf_HiFigan_dataset(config=self.config,
data_dir=pathlib.Path(self.config['DataIndexPath']) / self.config[
'valid_set_name'], infer=True)
def build_model(self):
# cfg=self.config['model_args']
# cfg.update({'sampling_rate':self.config['audio_sample_rate'],'num_mels':self.config['audio_num_mel_bins'],'hop_size':self.config['hop_size']})
# h=AttrDict(cfg)
self.generator=UnivNet(self.config,use_weight_norm=self.config['model_args'].get('use_weight_norm',True))
self.discriminator=nn.ModuleDict({'mrd':MultiResSpecDiscriminator(fft_sizes=self.config['model_args'].get('mrd_fft_sizes',[1024, 2048, 512]),
hop_sizes=self.config['model_args'].get('mrd_hop_sizes',[120, 240, 50]),
win_lengths= self.config['model_args'].get('mrd_win_lengths',[600, 1200, 240]),), 'mpd':MultiPeriodDiscriminator(periods=self.config['model_args']['discriminator_periods'])})
def build_losses_and_metrics(self): | self.mix_loss=univloss(self.config) | 1 | 2023-10-17 13:45:09+00:00 | 12k |
OllieBoyne/FOUND | FOUND/utils/eval_utils.py | [
{
"identifier": "modified_chamf",
"path": "FOUND/utils/pytorch3d.py",
"snippet": "def modified_chamf(x,y, x_lengths=None, y_lengths=None,\n x_normals=None, y_normals=None,\n norm: int = 2):\n \"\"\"\n \tA modified version of pytorch3d.loss.chamfer_distance\n \tto allow for no point or batch... | from pytorch3d.renderer import TexturesVertex
from pytorch3d.structures import Meshes
from multiprocessing import Process
from prettytable import PrettyTable
from .pytorch3d import modified_chamf, modified_sample
from .renderer import Renderer, view_from
from .vis import produce_grid, put_text, colourbar
from matplotlib import pyplot as plt
import os
import trimesh
import cv2
import multiprocessing as mp
import torch
import torch.nn.functional as F
import numpy as np
import json | 8,570 | if settings.get('model', 'FIND') == 'FIND':
# apply masking here to not include errors for sole in pred -> real
# errs has a sample of the vertices in, need to do correct indexing
sampled_vertex_mask = FIND_sole_faces_mask[pred_sample_dict['face_idxs'].cpu().detach().numpy()[0]]
errs['cham_x'][:, sampled_vertex_mask] = np.nan
errs['cham_norm_x'][:, sampled_vertex_mask] = np.nan
# vis_errs has all vertices in mesh in
vis_errs['cham_x'][:, FIND_sole_vertex_mask] = np.nan
vis_errs['cham_norm_x'][:, FIND_sole_vertex_mask] = np.nan
# visualisation info for each metric of error
vis_params = {
'cham': dict(vmin=0, vmax=1e-4, mag=1_000_000, units='um', cutoffs=np.array([5, 10, 15, 20, 25])*1e-6, xscale='log'),
'cham_norm': dict(vmin=0, vmax=60, mag=1, units='deg', cutoffs=[5, 7.5, 11.25, 22.5, 30], xscale='lin')
}
# define axes
fig, axs = plt.subplots(nrows=2, ncols=2, sharex='col')
axs[0, 0].set_title('Chamfer Error')
axs[0, 1].set_title('Normal Error')
axs[0, 0].set_ylabel('pred2real')
axs[1, 0].set_ylabel('real2pred')
axs[1, 0].set_xlabel('um')
axs[1, 1].set_xlabel('Degrees')
axs[1,1].set_xlim(0, 90)
axs[1, 1].set_yticks([0, 30, 60, 90])
with Reporter(os.path.join(eval_dir, 'report.txt')) as report:
report(f"Experiment: {exp_dir}")
i = 0
for L in ['cham', 'cham_norm']:
report(L)
table = PrettyTable()
cutoffs = vis_params[L]['cutoffs']
mag = vis_params[L]['mag']
table.field_names = ['Desc', 'Mean', 'Median', 'RMSE'] + [f'% < {round(x*mag)}' for x in cutoffs]
for desc, x in zip(['pred2real', 'real2pred'], ['x', 'y']):
e = errs[f'{L}_{x}'].cpu().detach().numpy()
e = e[~np.isnan(e)] # filter out nan values
metrics = eval_metrics(e, cutoffs=cutoffs)
table.add_row([desc] +
[f'{metrics[k] * mag:.2f}' for k in ['mean', 'median', 'rmse']] +
[f'{i * 100:.1f}' for i in metrics['cutoffs']]
)
# plot distribution of errors
ax = axs[i%2, i//2]
if vis_params[L]['xscale'] == 'log':
ax.hist(**get_loghist(np.ravel(e)*mag, 100), density=True)
ax.set_xscale('log')
else:
ax.hist(np.ravel(e) * mag, bins=100, density=True)
i+=1
results[f'{L}_{desc}'] = {**{k: metrics[k] * mag for k in ['mean', 'median', 'rmse']},
**{f'% < {round(c*mag)}': i * 100 for c, i in zip(cutoffs, metrics['cutoffs'])}}
report(table.get_string())
report("")
plt.savefig(os.path.join(eval_dir, 'err_dist.png'))
plt.close()
# Set up rendering
if render:
renderer: Renderer = Renderer(image_size=256, max_faces_per_bin=100_000, device=device)
R, T = view_from(['side1', 'topdown', 'side2'])
nviews = len(R)
vis_elements = []
# render chamf & norm err on GT mesh and pred mesh
for i, (mesh, err_key) in enumerate(zip([gt_mesh, pred_mesh, gt_mesh, pred_mesh],
['cham_y', 'cham_x', 'cham_norm_y', 'cham_norm_x'])):
vis_type = 'cham_norm' if 'norm' in err_key else 'cham'
# set texture according to error
this_error = vis_errs[err_key]
colours = err_to_colour(this_error, vmin=vis_params[vis_type]['vmin'], vmax=vis_params[vis_type]['vmax'])
mesh.textures = TexturesVertex(colours)
res = renderer(mesh, R, T, render_normals=False, render_sil=False) # render mesh
# add to plot
vis_elements.append([res['rgb'][n] for n in range(nviews)])
grid = produce_grid(vis_elements)
gridH, gridW, _ = grid.shape
left_size = gridH // 8 # size of left padding in pix
right_size = gridH // 8 # right size padding for colourbar
out = np.zeros((gridH, left_size + gridW + right_size, 3), dtype=np.uint8)
out[:, left_size:-right_size] = grid
# write row names
row_names = 'Chamf\nGT', 'Chamf\nFIND', 'Norm\nGT', 'Norm\nFIND'
for i in range(4):
out = put_text(out, row_names[i],
x=0, y=int(gridH*i/4), width=int(left_size), height=int(gridH//4), scale=left_size / 100,
vertical=True)
# add colourbars
# width, height, colours, points = (0, 1), orientation = 'vertical'
cW, cH = right_size//2, int(gridH*0.3)
cbar_x = left_size + gridW + (right_size - cW) // 2
cbar_ys = [int(0.1 * gridH), int(0.6*gridH)]
for key, y in zip(['cham', 'cham_norm'], cbar_ys):
params = vis_params[key]
| """Evaluate the performance of a fitted mesh"""
device = 'cuda'
def eval_metrics(arr, cutoffs=[5, 7.5, 11.25, 22.5, 30]):
"""Given a 1d array, return mean, median, rmse,
and % of values less than each in `cutoffs`"""
assert arr.ndim == 1, "eval_metrics requires 1D array"
out = dict(mean = arr.mean(), median = np.median(arr), rmse = (arr ** 2).mean() **.5,
cutoffs = [(arr < i).mean() for i in cutoffs])
return out
def err_to_colour(err: torch.Tensor, vmin:float=None, vmax:float=None, colmin=(0, 1, 0), colmax=(1, 0, 0), nan_colour=(0.3, 0.3, 0.3)):
"""Convert a tensor of errors (...) to an RGB colour scale (..., 3).
Linearly interpolate so that err of vmin -> colmin, err of vmax -> colmax
if vmin and vmax not given, take min and max of err
If any nan's given, set their colour to nan_colour
"""
ndim = err.ndim
colmin = torch.tensor(colmin)[(None,)*ndim].to(err.device) # expand colmin to [..., 3]
colmax = torch.tensor(colmax)[(None,)*ndim].to(err.device)
colnan = torch.tensor(nan_colour)[(None,)*ndim].to(err.device)
vmin = err.nanmin() if vmin is None else vmin
vmax = err.nanmax() if vmax is None else vmax
fracs = (err - vmin) / (vmax - vmin)
rgba = (colmin + fracs.unsqueeze(-1) * (colmax - colmin)).to(err.device)
rgba = torch.clip(rgba, min=0, max=1)
rgba[torch.any(torch.isnan(rgba), dim=-1)] = colnan
return rgba
class Reporter:
"""Receive statements, on exit print all and save all to file"""
def __init__(self, out_file_loc):
self.lines = []
self.out_file_loc = out_file_loc
def __call__(self, line):
self.lines.append(line)
def __enter__(self, *args):
return self
def __exit__(self, *args):
[*map(print, self.lines)]
with open(self.out_file_loc, 'w') as outfile:
outfile.writelines([s + '\n' for s in self.lines])
def get_max_fit(exp_dir):
"""Search in an experiment directory for the fit_xx.obj with the highest value"""
f = lambda s: -1 if 'fit_' not in s else int(s.split('fit_')[1].split('.obj')[0])
return max(os.listdir(exp_dir), key=f)
def cutoff_slice_FIND(mesh, max_heel_height = 0.04, cutoff_height = 0.1):
"""Similar mesh slicing method to FIND: identify heel keypoint, slice off 1cm above"""
X, Y, Z = mesh.vertices.T
Xma = np.ma.array(X, mask= Z >= max_heel_height)
heel_idx = np.ma.argmin(Xma)
slice_height = min(Z[heel_idx] + cutoff_height, Z.max() - 5e-3)
return mesh.slice_plane([0, 0, slice_height], [0, 0, -1], cap=False)
def get_loghist(x, nbins):
hist, bins = np.histogram(x, bins=nbins)
logbins = np.logspace(np.log10(bins[0]),np.log10(bins[-1]),len(bins))
return dict(x=x, bins=logbins)
def eval_exp(exp_dir, render=True):
results = {} # return results as errors
if not any('fit_' in f for f in os.listdir(exp_dir)):
print(f"No fits for {exp_dir}, skipping...")
return
pred_obj_loc = os.path.join(exp_dir, get_max_fit(exp_dir))
# load settings to get folder
opts_loc = os.path.join(exp_dir, 'opts.json')
if not os.path.isfile(opts_loc):
print(f"No opts for {exp_dir}, skipping...")
return
with open(opts_loc) as infile:
settings = json.load(infile)
# assume GT OBJ loc is
# (1) saved in <data_folder>/mesh.obj if <data_folder> given
if 'data_folder' in settings:
gt_obj_loc = os.path.join(settings['data_folder'], 'mesh.obj')
# (2) saved in <exp_dir>/gt_mesh.obj otherwise
else:
gt_obj_loc = os.path.join(exp_dir, 'gt_mesh.obj')
eval_dir = os.path.join(exp_dir, 'eval')
os.makedirs(eval_dir, exist_ok=True)
with open(gt_obj_loc) as infile:
d = trimesh.exchange.obj.load_obj(infile, process=False)
gt_mesh_trimesh = trimesh.Trimesh(**d)
with open(pred_obj_loc) as infile:
d = trimesh.exchange.obj.load_obj(infile, process=False)
pred_mesh_trimesh = trimesh.Trimesh(**d)
# pre-process meshes, w/ cutoff
# Same method as used for Foot3D here for slicing GT
gt_mesh_trimesh = cutoff_slice_FIND(gt_mesh_trimesh)
if settings.get('model', 'FIND') == 'FIND':
# slice FIND faces
FIND_cutoff_surface = np.load(os.path.join(settings['find_pth'], 'templ_masked_faces.npy'))
FIND_sole_faces = np.load(os.path.join(settings['find_pth'], 'templ_sole_faces.npy'))
FIND_sole_verts = np.unique(np.ravel(pred_mesh_trimesh.faces[FIND_sole_faces])) # all vertices considered part of the sole
sole_vert_positions = pred_mesh_trimesh.vertices[FIND_sole_verts] # save sole vertex positions to refind them after mesh pre-processing
pred_mesh_trimesh.update_faces(~np.isin(np.arange(pred_mesh_trimesh.faces.shape[0]), FIND_cutoff_surface))
pred_mesh_trimesh = cutoff_slice_FIND(pred_mesh_trimesh)
# define a mask
# want to be able to define a mask on the FIND model, so that errors of verts in this mask aren't considered real -> pred, but are considered in reverse
# (for sole verts, unfair to count the error on them, but likewise incorrect to just remove them all, especially at the boundary)
# recalculate sole vertices
FIND_sole_vert_idxs = np.argwhere(np.all(pred_mesh_trimesh.vertices[:, None, :] == sole_vert_positions[None, ...], axis=-1))[:, 0]
FIND_sole_vertex_mask = np.isin(np.arange(pred_mesh_trimesh.vertices.shape[0]), FIND_sole_vert_idxs) # mask of which vertices correspond to the sole
FIND_sole_faces_mask = np.any(FIND_sole_vertex_mask[pred_mesh_trimesh.faces], axis=-1) # mask of which faces are in sole
else:
pred_mesh_trimesh = cutoff_slice_FIND(pred_mesh_trimesh)
# Convert to PyTorch3D
p3d_from_trimesh = lambda mesh: Meshes(verts=torch.from_numpy(np.asarray(mesh.vertices)[None, ...]).float(),
faces=torch.from_numpy(np.asarray(mesh.faces)[None, ...])).to(device)
gt_mesh = p3d_from_trimesh(gt_mesh_trimesh)
pred_mesh = p3d_from_trimesh(pred_mesh_trimesh)
# Sample vertices uniformly from mesh, returning vertex position, normal, and original face/vert idxs
gt_sample_dict = modified_sample(gt_mesh, num_samples=10_000, return_normals=True)
pred_sample_dict = modified_sample(pred_mesh, num_samples=10_000, return_normals=True)
# Calculate errors for reporting - by considering samples over the surface
errs = modified_chamf(pred_sample_dict['verts'], gt_sample_dict['verts'],
x_normals=pred_sample_dict['normals'], y_normals=gt_sample_dict['normals'])
# Calculate errors for visualisation - by considering every vertex
vis_errs = modified_chamf(pred_mesh.verts_padded(), gt_mesh.verts_padded(),
x_normals=pred_mesh.verts_normals_padded(), y_normals=gt_mesh.verts_normals_padded())
# convert from cosine similarity to error in degrees
errs['cham_norm_x'] = torch.rad2deg(torch.acos(errs['cham_norm_x']))
errs['cham_norm_y'] = torch.rad2deg(torch.acos(errs['cham_norm_y']))
vis_errs['cham_norm_x'] = torch.rad2deg(torch.acos(vis_errs['cham_norm_x']))
vis_errs['cham_norm_y'] = torch.rad2deg(torch.acos(vis_errs['cham_norm_y']))
if settings.get('model', 'FIND') == 'FIND':
# apply masking here to not include errors for sole in pred -> real
# errs has a sample of the vertices in, need to do correct indexing
sampled_vertex_mask = FIND_sole_faces_mask[pred_sample_dict['face_idxs'].cpu().detach().numpy()[0]]
errs['cham_x'][:, sampled_vertex_mask] = np.nan
errs['cham_norm_x'][:, sampled_vertex_mask] = np.nan
# vis_errs has all vertices in mesh in
vis_errs['cham_x'][:, FIND_sole_vertex_mask] = np.nan
vis_errs['cham_norm_x'][:, FIND_sole_vertex_mask] = np.nan
# visualisation info for each metric of error
vis_params = {
'cham': dict(vmin=0, vmax=1e-4, mag=1_000_000, units='um', cutoffs=np.array([5, 10, 15, 20, 25])*1e-6, xscale='log'),
'cham_norm': dict(vmin=0, vmax=60, mag=1, units='deg', cutoffs=[5, 7.5, 11.25, 22.5, 30], xscale='lin')
}
# define axes
fig, axs = plt.subplots(nrows=2, ncols=2, sharex='col')
axs[0, 0].set_title('Chamfer Error')
axs[0, 1].set_title('Normal Error')
axs[0, 0].set_ylabel('pred2real')
axs[1, 0].set_ylabel('real2pred')
axs[1, 0].set_xlabel('um')
axs[1, 1].set_xlabel('Degrees')
axs[1,1].set_xlim(0, 90)
axs[1, 1].set_yticks([0, 30, 60, 90])
with Reporter(os.path.join(eval_dir, 'report.txt')) as report:
report(f"Experiment: {exp_dir}")
i = 0
for L in ['cham', 'cham_norm']:
report(L)
table = PrettyTable()
cutoffs = vis_params[L]['cutoffs']
mag = vis_params[L]['mag']
table.field_names = ['Desc', 'Mean', 'Median', 'RMSE'] + [f'% < {round(x*mag)}' for x in cutoffs]
for desc, x in zip(['pred2real', 'real2pred'], ['x', 'y']):
e = errs[f'{L}_{x}'].cpu().detach().numpy()
e = e[~np.isnan(e)] # filter out nan values
metrics = eval_metrics(e, cutoffs=cutoffs)
table.add_row([desc] +
[f'{metrics[k] * mag:.2f}' for k in ['mean', 'median', 'rmse']] +
[f'{i * 100:.1f}' for i in metrics['cutoffs']]
)
# plot distribution of errors
ax = axs[i%2, i//2]
if vis_params[L]['xscale'] == 'log':
ax.hist(**get_loghist(np.ravel(e)*mag, 100), density=True)
ax.set_xscale('log')
else:
ax.hist(np.ravel(e) * mag, bins=100, density=True)
i+=1
results[f'{L}_{desc}'] = {**{k: metrics[k] * mag for k in ['mean', 'median', 'rmse']},
**{f'% < {round(c*mag)}': i * 100 for c, i in zip(cutoffs, metrics['cutoffs'])}}
report(table.get_string())
report("")
plt.savefig(os.path.join(eval_dir, 'err_dist.png'))
plt.close()
# Set up rendering
if render:
renderer: Renderer = Renderer(image_size=256, max_faces_per_bin=100_000, device=device)
R, T = view_from(['side1', 'topdown', 'side2'])
nviews = len(R)
vis_elements = []
# render chamf & norm err on GT mesh and pred mesh
for i, (mesh, err_key) in enumerate(zip([gt_mesh, pred_mesh, gt_mesh, pred_mesh],
['cham_y', 'cham_x', 'cham_norm_y', 'cham_norm_x'])):
vis_type = 'cham_norm' if 'norm' in err_key else 'cham'
# set texture according to error
this_error = vis_errs[err_key]
colours = err_to_colour(this_error, vmin=vis_params[vis_type]['vmin'], vmax=vis_params[vis_type]['vmax'])
mesh.textures = TexturesVertex(colours)
res = renderer(mesh, R, T, render_normals=False, render_sil=False) # render mesh
# add to plot
vis_elements.append([res['rgb'][n] for n in range(nviews)])
grid = produce_grid(vis_elements)
gridH, gridW, _ = grid.shape
left_size = gridH // 8 # size of left padding in pix
right_size = gridH // 8 # right size padding for colourbar
out = np.zeros((gridH, left_size + gridW + right_size, 3), dtype=np.uint8)
out[:, left_size:-right_size] = grid
# write row names
row_names = 'Chamf\nGT', 'Chamf\nFIND', 'Norm\nGT', 'Norm\nFIND'
for i in range(4):
out = put_text(out, row_names[i],
x=0, y=int(gridH*i/4), width=int(left_size), height=int(gridH//4), scale=left_size / 100,
vertical=True)
# add colourbars
# width, height, colours, points = (0, 1), orientation = 'vertical'
cW, cH = right_size//2, int(gridH*0.3)
cbar_x = left_size + gridW + (right_size - cW) // 2
cbar_ys = [int(0.1 * gridH), int(0.6*gridH)]
for key, y in zip(['cham', 'cham_norm'], cbar_ys):
params = vis_params[key] | cbar = colourbar(cW, cH, colours=((255, 0, 0), (0, 255, 0))) | 6 | 2023-10-24 11:46:42+00:00 | 12k |
RobertCsordas/moe | tasks/simple/language_model/enwik8_transformer.py | [
{
"identifier": "TransformerLanguageModel",
"path": "models/transformer_language_model.py",
"snippet": "class TransformerLanguageModel(LoggingLayer, torch.nn.Module):\n def __init__(self, voc_size: int, embedding_size: Optional[int], state_size: int, dropout: float,\n tied_embedding: ... | import framework
import torch
import torch.nn
import torch.utils.data
import dataset
import random
from models import TransformerLanguageModel
from ... import task, args
from .transformer_lm_mixin import TransformerLMMixin
from ..simple_task import SimpleTask
from typing import Tuple, Any, Dict, List, Union
from interfaces import LanguageModelInterface | 9,241 |
@args
def a(parser: framework.helpers.ArgumentParser):
parser.add_argument("-lm.state_drop_probability", default=0.0)
parser.add_argument("-lm.lstm_weight_drop", default=0.0)
parser.add_argument("-lm.unroll", default=100)
parser.add_argument("-lm.unroll_eval", default="none", parser=parser.int_or_none_parser)
parser.add_argument("-lm.example_context", default=100)
parser.add_argument("-lm.example_window", default=40)
@task()
|
@args
def a(parser: framework.helpers.ArgumentParser):
parser.add_argument("-lm.state_drop_probability", default=0.0)
parser.add_argument("-lm.lstm_weight_drop", default=0.0)
parser.add_argument("-lm.unroll", default=100)
parser.add_argument("-lm.unroll_eval", default="none", parser=parser.int_or_none_parser)
parser.add_argument("-lm.example_context", default=100)
parser.add_argument("-lm.example_window", default=40)
@task() | class Enwik8Transformer(TransformerLMMixin, SimpleTask): | 3 | 2023-10-16 11:26:45+00:00 | 12k |
blackgold3/SemanticBoost | mdm/model_util.py | [
{
"identifier": "MDM",
"path": "mdm/model/mdm.py",
"snippet": "class MDM(nn.Module):\n def __init__(self, njoints, nfeats, latent_dim=256, ff_size=1024, num_layers=8, num_heads=4, dropout=0.1,\n activation=\"gelu\", dataset='amass', clip_dim=512,\n arch='trans_enc', cl... | from mdm.model.mdm import MDM
from mdm.diffusion import gaussian_diffusion as gd
from mdm.diffusion.respace import SpacedDiffusion, space_timesteps, InpaintingGaussianDiffusion
from mdm.model.trt_model import TRT_MDM | 7,562 |
def load_model_wo_clip(model, state_dict):
print("load model checkpoints without clip")
try:
new_state_dict = {}
for key, value in state_dict.items():
if "in_proj" in key:
keyq = key.replace("in_proj_weight", "wq.weight")
keyk = key.replace("in_proj_weight", "wk.weight")
keyv = key.replace("in_proj_weight", "wv.weight")
inshape = value.shape[0] // 3
valueq = value[:inshape]
valuek = value[inshape:inshape * 2]
valuev = value[inshape * 2:]
new_state_dict[keyq] = valueq
new_state_dict[keyk] = valuek
new_state_dict[keyv] = valuev
elif "out_proj" in key:
newkey = key.replace("out_proj", "wo")
new_state_dict[newkey] = value
else:
new_state_dict[key] = value
missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False)
except:
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
print(unexpected_keys)
other_miss = []
for key in missing_keys:
if not key.startswith('clip_model.'):
other_miss.append(key)
print(other_miss)
assert all([k.startswith('clip_model.') for k in missing_keys])
def load_ft_model_wo_clip(model, state_dict):
print("load model checkpoints without clip")
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
print(unexpected_keys)
# for name, value in model.named_parameters():
# if "seqTransEncoder" in name and "self_attn" in name:
# value.requires_grad = False
# if name.startswith("code_full") or name.startswith("encode_compress") or name.startswith("input_process"):
# value.requires_grad = False
assert all([k.startswith('clip_pose_encoder.') for k in unexpected_keys])
# assert all([k.startswith('clip_model.') or k.startswith('clip_pose_encoder.') or k.startswith('embed_text.') for k in missing_keys])
def create_model_and_diffusion(args, mode="text", json_dict=None):
model = MDM(**get_model_args(args), json_dict=json_dict)
diffusion = create_gaussian_diffusion(args, mode)
return model, diffusion
def create_trt_model(args, model, mode="text", json_dict=None, device="cuda"):
model = TRT_MDM(model, json_dict, device=device)
diffusion = create_gaussian_diffusion(args, mode)
return model, diffusion
def get_model_args(args):
# default args
clip_version = 'ViT-B/32'
if args.unconstrained:
cond_mode = 'no_cond'
elif args.dataset in ['kit', 'humanml']:
cond_mode = "text"
activation = args.trans_activate if args.arch != "trans_enc" else "gelu"
if args.dataset == 'humanml':
njoints = 263
nfeats = 1
elif args.dataset == 'kit':
njoints = 251
nfeats = 1
if args.rep == "smr":
njoints += 6
nfeats = 1
return {'njoints': njoints, 'nfeats': nfeats, 'latent_dim': args.latent_dim, 'ff_size': args.ff_size, 'num_layers': args.layers, 'num_heads': args.heads,
'dropout': 0.1, 'activation': activation, 'cond_mode': cond_mode, 'cond_mask_prob': args.cond_mask_prob, 'arch': args.arch,
'clip_version': clip_version, 'dataset': args.dataset, "local":args.local, "encode_full":args.encode_full, "txt_tokens":args.txt_tokens,
"dataset_path":args.dataset_path, "num_frames":args.num_frames, "conv_bias":args.conv_bias, "conv_activate":args.conv_activate,
"conv_norm":args.conv_norm}
def create_gaussian_diffusion(args, mode="text"):
# default params
predict_xstart = True # we always predict x_start (a.k.a. x0), that's our deal!
steps = 1000
scale_beta = 1. # no scaling
timestep_respacing = '' # can be used for ddim sampling, we don't use it.
learn_sigma = False
rescale_timesteps = False
betas = gd.get_named_beta_schedule(args.noise_schedule, steps, scale_beta)
loss_type = gd.LossType.MSE
if not timestep_respacing:
timestep_respacing = [steps]
if mode is not None and (mode.startswith("finetune_control") or mode == "control_length"):
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> inpainting diffusion model")
|
def load_model_wo_clip(model, state_dict):
print("load model checkpoints without clip")
try:
new_state_dict = {}
for key, value in state_dict.items():
if "in_proj" in key:
keyq = key.replace("in_proj_weight", "wq.weight")
keyk = key.replace("in_proj_weight", "wk.weight")
keyv = key.replace("in_proj_weight", "wv.weight")
inshape = value.shape[0] // 3
valueq = value[:inshape]
valuek = value[inshape:inshape * 2]
valuev = value[inshape * 2:]
new_state_dict[keyq] = valueq
new_state_dict[keyk] = valuek
new_state_dict[keyv] = valuev
elif "out_proj" in key:
newkey = key.replace("out_proj", "wo")
new_state_dict[newkey] = value
else:
new_state_dict[key] = value
missing_keys, unexpected_keys = model.load_state_dict(new_state_dict, strict=False)
except:
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
print(unexpected_keys)
other_miss = []
for key in missing_keys:
if not key.startswith('clip_model.'):
other_miss.append(key)
print(other_miss)
assert all([k.startswith('clip_model.') for k in missing_keys])
def load_ft_model_wo_clip(model, state_dict):
print("load model checkpoints without clip")
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
print(unexpected_keys)
# for name, value in model.named_parameters():
# if "seqTransEncoder" in name and "self_attn" in name:
# value.requires_grad = False
# if name.startswith("code_full") or name.startswith("encode_compress") or name.startswith("input_process"):
# value.requires_grad = False
assert all([k.startswith('clip_pose_encoder.') for k in unexpected_keys])
# assert all([k.startswith('clip_model.') or k.startswith('clip_pose_encoder.') or k.startswith('embed_text.') for k in missing_keys])
def create_model_and_diffusion(args, mode="text", json_dict=None):
model = MDM(**get_model_args(args), json_dict=json_dict)
diffusion = create_gaussian_diffusion(args, mode)
return model, diffusion
def create_trt_model(args, model, mode="text", json_dict=None, device="cuda"):
model = TRT_MDM(model, json_dict, device=device)
diffusion = create_gaussian_diffusion(args, mode)
return model, diffusion
def get_model_args(args):
# default args
clip_version = 'ViT-B/32'
if args.unconstrained:
cond_mode = 'no_cond'
elif args.dataset in ['kit', 'humanml']:
cond_mode = "text"
activation = args.trans_activate if args.arch != "trans_enc" else "gelu"
if args.dataset == 'humanml':
njoints = 263
nfeats = 1
elif args.dataset == 'kit':
njoints = 251
nfeats = 1
if args.rep == "smr":
njoints += 6
nfeats = 1
return {'njoints': njoints, 'nfeats': nfeats, 'latent_dim': args.latent_dim, 'ff_size': args.ff_size, 'num_layers': args.layers, 'num_heads': args.heads,
'dropout': 0.1, 'activation': activation, 'cond_mode': cond_mode, 'cond_mask_prob': args.cond_mask_prob, 'arch': args.arch,
'clip_version': clip_version, 'dataset': args.dataset, "local":args.local, "encode_full":args.encode_full, "txt_tokens":args.txt_tokens,
"dataset_path":args.dataset_path, "num_frames":args.num_frames, "conv_bias":args.conv_bias, "conv_activate":args.conv_activate,
"conv_norm":args.conv_norm}
def create_gaussian_diffusion(args, mode="text"):
# default params
predict_xstart = True # we always predict x_start (a.k.a. x0), that's our deal!
steps = 1000
scale_beta = 1. # no scaling
timestep_respacing = '' # can be used for ddim sampling, we don't use it.
learn_sigma = False
rescale_timesteps = False
betas = gd.get_named_beta_schedule(args.noise_schedule, steps, scale_beta)
loss_type = gd.LossType.MSE
if not timestep_respacing:
timestep_respacing = [steps]
if mode is not None and (mode.startswith("finetune_control") or mode == "control_length"):
print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> inpainting diffusion model") | diffusion = InpaintingGaussianDiffusion | 4 | 2023-10-20 14:53:26+00:00 | 12k |
pythonlessons/FinRock | experiments/testing_ppo_sinusoid.py | [
{
"identifier": "PdDataFeeder",
"path": "finrock/data_feeder.py",
"snippet": "class PdDataFeeder:\n def __init__(\n self, \n df: pd.DataFrame,\n indicators: list = [],\n min: float = None,\n max: float = None\n ) -> None:\n self... | import numpy as np
import pandas as pd
import tensorflow as tf
from finrock.data_feeder import PdDataFeeder
from finrock.trading_env import TradingEnv
from finrock.render import PygameRender
from finrock.scalers import MinMaxScaler
from finrock.reward import simpleReward
from finrock.metrics import DifferentActions, AccountValue, MaxDrawdown, SharpeRatio
from finrock.indicators import BolingerBands, RSI, PSAR, SMA | 9,010 | tf.get_logger().setLevel('ERROR')
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
df = pd.read_csv('Datasets/random_sinusoid.csv')
df = df[-1000:]
pd_data_feeder = PdDataFeeder(
df,
indicators = [
BolingerBands(data=df, period=20, std=2),
RSI(data=df, period=14),
PSAR(data=df),
SMA(data=df, period=7),
SMA(data=df, period=25),
SMA(data=df, period=99),
]
)
env = TradingEnv(
data_feeder = pd_data_feeder,
| tf.get_logger().setLevel('ERROR')
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
df = pd.read_csv('Datasets/random_sinusoid.csv')
df = df[-1000:]
pd_data_feeder = PdDataFeeder(
df,
indicators = [
BolingerBands(data=df, period=20, std=2),
RSI(data=df, period=14),
PSAR(data=df),
SMA(data=df, period=7),
SMA(data=df, period=25),
SMA(data=df, period=99),
]
)
env = TradingEnv(
data_feeder = pd_data_feeder, | output_transformer = MinMaxScaler(min=pd_data_feeder.min, max=pd_data_feeder.max), | 3 | 2023-10-23 07:44:54+00:00 | 12k |
hitlic/deepepochs | deepepochs/trainer.py | [
{
"identifier": "StopLoopException",
"path": "deepepochs/loops.py",
"snippet": "class StopLoopException(Exception):\r\n pass\r"
},
{
"identifier": "LoopException",
"path": "deepepochs/loops.py",
"snippet": "class LoopException(Exception):\r\n pass\r"
},
{
"identifier": "Ten... | import math
import time
import torch
from datetime import datetime
from collections import defaultdict
from typing import List, Dict, Callable
from torch.optim import Adam
from torch.utils.data import DataLoader
from accelerate import Accelerator
from .loops import (StopLoopException, LoopException, TensorTuple,
flatten_dict, default_loss, concat_dicts, to_numpy, listify, batch_size, concat, detach_clone)
from .tools import batches
from .optimizer import Optimizer, Optimizers
from .patches import PatchBase, MeanPatch, TensorPatch, run_patch_dict, run_patch_dicts
from .callbacks import CallbackPool, DefaultCallback, CallbackException
from tqdm import tqdm
| 9,134 | batch_y_4cbk = batch_y
if self.do_loss:
loss_4cbk = self.loss_fn(model_out, batch_y)
else:
loss_4cbk = None
self.trainer.callbacks.trigger(f'{self.stage}_metrics', trainer=self.trainer, loss=loss_4cbk, model_out=model_out_4cbk, batch_y=batch_y_4cbk, task=self.task)
return loss_4cbk
class TrainerBase:
def __init__(self, model,
loss=None,
opt=None,
epochs=1000,
device=None,
callbacks=None,
metrics=None,
metric_patch:['mean', 'tensor']='tensor',
resume=False,
running_id=None,
hyper_params=None,
log_long=False,
log_batch=True,
log_tqdm=False,
show_info=True,
compile_model=False,
grad_accumulate_steps=1,
):
"""
Args:
model: Pytorch模型(nn.Module)
loss: 损失函数
opt: 优化器,或优化器列表;优化器是Pytorch优化器或deepepochs.Optimizer对象
epochs [int]: 迭代次数
device [str]: 加速设备,可取值包括
- cpu、cuda、mps等Pytorch支持的设备
- Accelerator对象,利用Hugging Face Accelerate实现多机多卡或混合精度训练
callbacks [List[Callback]]: Callback或Callback列表。
metrics [Callable]: 指标函数列表;通用于训练、验证和测试。
metric_patch [PatchBase]: 封装metrics所用的Patch类型,可选项为 mean 或 tensor
resume [bool, int, str]: 是否从logs文件平中的Checkpoint加载
- False表示不加载
- True表示从最新的Checkpoint加载
- int、str表示加载相应ID的Checkpoint
running_id [int, str, None]: 当前训练的运行编号,用于指定日志和checkpoint的文件夹名
hyper_params [dict, None]: 调参所关注的重要超参数,用于写入日志文件辅助调参
log_long [bool]: 指标输出为长格式(7位小数)还是短格式(4位小数)
log_batch [bool]: 训练过程中是否每个batch输出一次指标值
log_tqdm [bool]: 是否使用tqdm显示进度
compile_model [bool]: 利用PyTorch 2.x对模型compile以提升速度(暂不支持mps、Windows [v2.1])
grad_accumulate_steps [int]: 累积梯度更新时的累积次数,大于1表示启用累积梯度更新
"""
self.show_info = show_info
# 检测与配置加速设备
if device is not None:
self.device = device
elif torch.cuda.is_available():
self.device = 'cuda'
elif torch.backends.mps.is_available() and not compile_model:
self.device = 'mps'
else:
self.device = 'cpu'
# Pytorch支持的设备类型
device_types = ['cpu', 'cuda', 'ipu', 'xpu', 'mkldnn', 'opengl', 'opencl',
'ideep', 'hip', 've', 'fpga', 'ort', 'xla', 'lazy', 'vulkan',
'mps', 'meta', 'hpu', 'mtia', 'privateuseone']
# 使用Accelerate,用于实现分布式或混合精度训练
if isinstance(self.device, Accelerator):
self.accelerator = self.device
self.device = self.accelerator.device
self.main_process = self.accelerator.is_main_process # 是否主进程
else:
assert str(self.device).split(':', maxsplit=1)[0] in device_types, f'Pytorch不支持的{self.device}设备!\nPytorch支持的设备有:{device_types}'
self.accelerator = None
self.main_process = True
# 配置模型
if compile_model:
model = torch.compile(model)
self.model = ModelWrapper(model, self).to(self.device)
# 梯度累积次数
assert isinstance(grad_accumulate_steps, int) and grad_accumulate_steps > 0, '梯度累积次数`grad_accumulate_steps`必须为正整数!'
self.grad_accumulate_steps = grad_accumulate_steps
if self.accelerator is not None and self.accelerator.gradient_accumulation_steps > 1:
# 优先使用accelerator中的gradient_accumulation_steps
self.grad_accumulate_steps = self.accelerator.gradient_accumulation_steps
# 配置损失函数
if loss is None:
self.loss = LossWrapper(default_loss, self)
else:
self.loss = LossWrapper(loss, self)
# 配置优化器
if opt is None:
self.opt = Optimizer(Adam(model.parameters(), lr=0.001))
elif isinstance(opt, torch.optim.Optimizer):
self.opt = Optimizer(opt)
elif isinstance(opt, (Optimizer, Optimizers)): # Optimizers是多个Optimizer的列表
self.opt = opt
elif isinstance(opt, (list, tuple)): # 多个优化器的情况
opt_lst = [Optimizer(o) if isinstance(o, torch.optim.Optimizer) else o for o in opt]
assert all(isinstance(o, Optimizer) for o in opt_lst), "优化器参数存在错误!"
self.opt = Optimizers(opt_lst)
else:
raise ValueError('`opt`参数取值错误!')
# 迭代次数
self.max_epochs = epochs
# 起始迭代
self.init_epoch = 0
# 配置Callbacks
callbacks = listify(callbacks)
self.log_tqdm = log_tqdm
log_batch = False if log_tqdm else log_batch
| """
@author: liuchen
"""
class EpochTask:
"""一个Epoch的训练、验证或测试任务"""
def __init__(self, dataloader, metrics=None, do_loss=True, **step_args):
"""
Args:
dataloader: pytorch Dataloader
metrics: 指标函数列表
do_loss: 验证和测试中是否计算据损失
step_args: 其他需要传递给`step`、`train_step`、`val_step`、`test_step`和`evaluate`方法的参数
"""
self.dataloader = dataloader
self.batchs = len(dataloader)
self.metrics = listify(metrics)
self.do_loss = do_loss
self.trainer = None
self.stage = None
self.val_freq = None
self.step_args = step_args
self.batch_patch_dict = {} # 由DefaultCallback中的on_train/val/test_prediction回调注入
def __len__(self):
return self.batchs
def __getattr__(self, name):
"""如果要找的属性和方法不存在,则到trainer中找"""
return getattr(self.trainer, name, None)
def __call__(self):
phase = 'train' if self.stage=='train' else 'evaluate'
if self.stage == 'train':
self.model.train()
else:
self.model.eval()
self.model.stage = self.stage
self.loss.stage = self.stage
self.loss.do_loss = self.do_loss
self.loss.task = self
# 配置指标,在DefaultCallback中的on_train/val/test_prediction中用于构造Patch
if self.stage == 'train':
self.metrics = [m for m in self.metrics if m not in self.train_metrics] + self.train_metrics
elif self.stage == 'val':
self.metrics = [m for m in self.metrics if m not in self.val_metrics] + self.val_metrics
else:
self.metrics = [m for m in self.metrics if m not in self.test_metrics] + self.test_metrics
with torch.no_grad():
self.callbacks.trigger(f'before_{self.stage}_epoch', trainer=self, task=self)
epoch_patch_dicts = []
for batch_idx, batch_data in enumerate(self.dataloader):
batch_x, batch_y = self.prepare_data(batch_data)
self.callbacks.trigger(f'before_{self.stage}_batch', trainer=self.trainer, batch_x=batch_x, batch_y=batch_y, batch_idx=batch_idx)
# 获取mini-batch的`*step`方法
# 1. 最优先使用`EpochTask.step`、`Trainer.step`
step_method = getattr(self, 'step', None)
# 2. 次优先使用`EpochTask.train_step`、`Epoch.val_step`、`EpochTask.test_step`
# 3. 其次使用`Trainer.train_step`、`Trainer.val_step`、`Trainer.test_step`
step_method = getattr(self, f'{self.stage}_step') if step_method is None else step_method
# 4. 再次使用`EpochTask.evaluate_step`方法
# 5. 最次使用`Trainer.evaluate_step`
step_method = getattr(self, f'{phase}_step') if step_method is None else step_method
# 运行mini-batch的`*step`方法
if self.stage == 'train':
with torch.enable_grad():
step_out = step_method(batch_x, batch_y, **self.step_args)
else:
step_out = step_method(batch_x, batch_y, **self.step_args)
if step_out is not None:
if not isinstance(step_out, dict):
raise LoopException(f'{step_method} 方法的返回值必须为字典!')
if not all(isinstance(v, PatchBase) for k, v in step_out.items()):
raise LoopException(f'{step_method} 方法返回字典的value必须为Patch(deepepochs.PatchBase子类对象)!')
patch_dict = step_out
else:
patch_dict = {}
self.batch_patch_dict.update(patch_dict)
epoch_patch_dicts.append(self.batch_patch_dict)
# 计算当前batch的指标
batch_metric_values = flatten_dict(run_patch_dict(self.batch_patch_dict), sep='')
self.callbacks.trigger(f'after_{self.stage}_batch', trainer=self.trainer, metrics=batch_metric_values, batch_idx=batch_idx)
# 清空 self.batch_patch_dict
self.batch_patch_dict = {}
# 计算当前epoch的指标
epoch_metrics_values = flatten_dict(run_patch_dicts(epoch_patch_dicts), sep='')
self.callbacks.trigger(f'after_{self.stage}_epoch', trainer=self.trainer, task=self, metrics=epoch_metrics_values)
return epoch_metrics_values
class ModelWrapper:
"""
用于实现回调:
on_before_train_forward on_after_train_forward
on_before_val_forward on_after_val_forward
on_before_test_forward on_after_test_forward
"""
def __init__(self, model, trainer):
# self.model = torch.compile(model)
self.model = model
self.trainer = trainer
self.stage = None
def __getattr__(self, name):
return getattr(self.model, name)
def __call__(self, *args, **kwds):
self.trainer.callbacks.trigger(f'before_{self.stage}_forward', trainer=self)
model_out = self.model(*args, **kwds)
self.trainer.callbacks.trigger(f'after_{self.stage}_forward', trainer=self, model_out=model_out)
return model_out
def train(self):
self.model.train()
def eval(self):
self.model.eval()
def to(self, device):
self.model = self.model.to(device)
return self
def cpu(self):
self.model = self.model.cpu()
return self
def cuda(self):
self.model = self.model.cuda()
return self
def parameters(self):
return self.model.parameters()
def modules(self):
return self.model.modules()
def state_dict(self):
return self.model.state_dict()
def load_state_dict(self, state_dict):
self.model.load_state_dict(state_dict)
class LossWrapper:
"""
1. 自动完成zero_grad、backward、opt.step等操作
2. 配合实现梯度累积
3. 实现回调
on_before_backward on_after_backward
on_before_optimize on_after_optimize
on_train_metrics on_val_metrics on_test_metrics
"""
def __init__(self, loss_fn, trainer):
self.loss_fn = loss_fn
self.trainer = trainer
self.stage = None
self.do_loss = None
self.task = None
self.total_loss = 0 # 用于实现累积梯度
self.model_outs = [] # 用于实现累积梯度
self.batch_ys = [] # 用于实现累积梯度
def optimize(self):
self.trainer.callbacks.trigger('before_optimize', trainer=self)
self.trainer.opt.step()
self.trainer.opt.zero_grad()
self.trainer.callbacks.trigger('after_optimize', trainer=self)
def __call__(self, model_out, batch_y, grad_accumulate=False):
"""
Args:
model_out: 模型预测输出
batch_y: 标签
grad_accumulate: 是否累积梯度
"""
if self.stage == 'train':
# 计算损失
loss = self.loss_fn(model_out, batch_y)
# backward
self.trainer.callbacks.trigger('before_backward', trainer=self, loss=loss)
if self.trainer.accelerator is None:
(loss/self.trainer.grad_accumulate_steps).backward()
else: # accelerate的backward
self.trainer.accelerator.backward(loss/self.trainer.grad_accumulate_steps)
self.trainer.callbacks.trigger('after_backward', trainer=self, loss=loss)
# 记录各sub-batch的总损失、模型输出、标签
_loss = loss.detach().clone()
self.total_loss += _loss * batch_size(model_out)
self.model_outs.append(detach_clone(model_out))
self.batch_ys.append(batch_y)
# 梯度累积
if grad_accumulate:
if self.trainer.accelerator is not None: # DeepEpochs的梯度累积要求仅最后一个sub-batch优化
self.optimize() # Accelerate的梯度累积要求每个sub-batch都优化
return _loss
else:
self.optimize()
# 计算平均损失,拼接多次累积度累积中的sub-batch的model_out和batch_y
loss_4cbk = self.total_loss / sum(batch_size(o) for o in self.model_outs)
model_out_4cbk = concat(self.model_outs)
batch_y_4cbk = concat(self.batch_ys)
self.total_loss = 0
self.model_outs = []
self.batch_ys = []
else:
# 验证与测试不需要实现分批,如果需要的话可使用较小的batch_size
model_out_4cbk = model_out
batch_y_4cbk = batch_y
if self.do_loss:
loss_4cbk = self.loss_fn(model_out, batch_y)
else:
loss_4cbk = None
self.trainer.callbacks.trigger(f'{self.stage}_metrics', trainer=self.trainer, loss=loss_4cbk, model_out=model_out_4cbk, batch_y=batch_y_4cbk, task=self.task)
return loss_4cbk
class TrainerBase:
def __init__(self, model,
loss=None,
opt=None,
epochs=1000,
device=None,
callbacks=None,
metrics=None,
metric_patch:['mean', 'tensor']='tensor',
resume=False,
running_id=None,
hyper_params=None,
log_long=False,
log_batch=True,
log_tqdm=False,
show_info=True,
compile_model=False,
grad_accumulate_steps=1,
):
"""
Args:
model: Pytorch模型(nn.Module)
loss: 损失函数
opt: 优化器,或优化器列表;优化器是Pytorch优化器或deepepochs.Optimizer对象
epochs [int]: 迭代次数
device [str]: 加速设备,可取值包括
- cpu、cuda、mps等Pytorch支持的设备
- Accelerator对象,利用Hugging Face Accelerate实现多机多卡或混合精度训练
callbacks [List[Callback]]: Callback或Callback列表。
metrics [Callable]: 指标函数列表;通用于训练、验证和测试。
metric_patch [PatchBase]: 封装metrics所用的Patch类型,可选项为 mean 或 tensor
resume [bool, int, str]: 是否从logs文件平中的Checkpoint加载
- False表示不加载
- True表示从最新的Checkpoint加载
- int、str表示加载相应ID的Checkpoint
running_id [int, str, None]: 当前训练的运行编号,用于指定日志和checkpoint的文件夹名
hyper_params [dict, None]: 调参所关注的重要超参数,用于写入日志文件辅助调参
log_long [bool]: 指标输出为长格式(7位小数)还是短格式(4位小数)
log_batch [bool]: 训练过程中是否每个batch输出一次指标值
log_tqdm [bool]: 是否使用tqdm显示进度
compile_model [bool]: 利用PyTorch 2.x对模型compile以提升速度(暂不支持mps、Windows [v2.1])
grad_accumulate_steps [int]: 累积梯度更新时的累积次数,大于1表示启用累积梯度更新
"""
self.show_info = show_info
# 检测与配置加速设备
if device is not None:
self.device = device
elif torch.cuda.is_available():
self.device = 'cuda'
elif torch.backends.mps.is_available() and not compile_model:
self.device = 'mps'
else:
self.device = 'cpu'
# Pytorch支持的设备类型
device_types = ['cpu', 'cuda', 'ipu', 'xpu', 'mkldnn', 'opengl', 'opencl',
'ideep', 'hip', 've', 'fpga', 'ort', 'xla', 'lazy', 'vulkan',
'mps', 'meta', 'hpu', 'mtia', 'privateuseone']
# 使用Accelerate,用于实现分布式或混合精度训练
if isinstance(self.device, Accelerator):
self.accelerator = self.device
self.device = self.accelerator.device
self.main_process = self.accelerator.is_main_process # 是否主进程
else:
assert str(self.device).split(':', maxsplit=1)[0] in device_types, f'Pytorch不支持的{self.device}设备!\nPytorch支持的设备有:{device_types}'
self.accelerator = None
self.main_process = True
# 配置模型
if compile_model:
model = torch.compile(model)
self.model = ModelWrapper(model, self).to(self.device)
# 梯度累积次数
assert isinstance(grad_accumulate_steps, int) and grad_accumulate_steps > 0, '梯度累积次数`grad_accumulate_steps`必须为正整数!'
self.grad_accumulate_steps = grad_accumulate_steps
if self.accelerator is not None and self.accelerator.gradient_accumulation_steps > 1:
# 优先使用accelerator中的gradient_accumulation_steps
self.grad_accumulate_steps = self.accelerator.gradient_accumulation_steps
# 配置损失函数
if loss is None:
self.loss = LossWrapper(default_loss, self)
else:
self.loss = LossWrapper(loss, self)
# 配置优化器
if opt is None:
self.opt = Optimizer(Adam(model.parameters(), lr=0.001))
elif isinstance(opt, torch.optim.Optimizer):
self.opt = Optimizer(opt)
elif isinstance(opt, (Optimizer, Optimizers)): # Optimizers是多个Optimizer的列表
self.opt = opt
elif isinstance(opt, (list, tuple)): # 多个优化器的情况
opt_lst = [Optimizer(o) if isinstance(o, torch.optim.Optimizer) else o for o in opt]
assert all(isinstance(o, Optimizer) for o in opt_lst), "优化器参数存在错误!"
self.opt = Optimizers(opt_lst)
else:
raise ValueError('`opt`参数取值错误!')
# 迭代次数
self.max_epochs = epochs
# 起始迭代
self.init_epoch = 0
# 配置Callbacks
callbacks = listify(callbacks)
self.log_tqdm = log_tqdm
log_batch = False if log_tqdm else log_batch
| self.default_cbk = DefaultCallback(log_long, log_batch, log_tqdm)
| 21 | 2023-10-19 05:41:48+00:00 | 12k |
vorausrobotik/voraus-ad-dataset | train.py | [
{
"identifier": "Configuration",
"path": "configuration.py",
"snippet": "class Configuration(BaseModel):\n \"\"\"Describes the configuration parameters.\"\"\"\n\n seed: int\n epochs: int\n batchsize: int\n n_hidden_layers: int = Field(alias=\"nHiddenLayers\")\n n_coupling_blocks: int =... | import random
import numpy
import pandas
import torch
import torch.backends.cudnn
from pathlib import Path
from typing import Dict, List, Optional
from sklearn import metrics
from torch import optim
from configuration import Configuration
from normalizing_flow import NormalizingFlow, get_loss, get_loss_per_sample
from voraus_ad import ANOMALY_CATEGORIES, Signals, load_torch_dataloaders | 7,694 | """Contains the training of the normalizing flow model."""
# If deterministic CUDA is activated, some calculations cannot be calculated in parallel on the GPU.
# The training will take much longer but is reproducible.
DETERMINISTIC_CUDA = False
DATASET_PATH = Path.home() / "Downloads" / "voraus-ad-dataset-100hz.parquet"
MODEL_PATH: Optional[Path] = Path.cwd() / "model.pth"
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Define the training configuration and hyperparameters of the model.
configuration = Configuration(
columns="machine",
epochs=70,
frequencyDivider=1,
trainGain=1.0,
seed=177,
batchsize=32,
nCouplingBlocks=4,
clamp=1.2,
learningRate=8e-4,
normalize=True,
pad=True,
nHiddenLayers=0,
scale=2,
kernelSize1=13,
dilation1=2,
kernelSize2=1,
dilation2=1,
kernelSize3=1,
dilation3=1,
milestones=[11, 61],
gamma=0.1,
)
# Make the training reproducible.
torch.manual_seed(configuration.seed)
torch.cuda.manual_seed_all(configuration.seed)
numpy.random.seed(configuration.seed)
random.seed(configuration.seed)
if DETERMINISTIC_CUDA:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Disable pylint too-many-variables here for readability.
# The whole training should run in a single function call.
def train() -> List[Dict]: # pylint: disable=too-many-locals
"""Trains the model with the paper-given parameters.
Returns:
The auroc (mean over categories) and loss per epoch.
"""
# Load the dataset as torch data loaders.
train_dataset, _, train_dl, test_dl = load_torch_dataloaders(
dataset=DATASET_PATH,
batch_size=configuration.batchsize,
columns=Signals.groups()[configuration.columns],
seed=configuration.seed,
frequency_divider=configuration.frequency_divider,
train_gain=configuration.train_gain,
normalize=configuration.normalize,
pad=configuration.pad,
)
# Retrieve the shape of the data for the model initialization.
n_signals = train_dataset.tensors[0].shape[1]
n_times = train_dataset.tensors[0].shape[0]
# Initialize the model, optimizer and scheduler.
model = NormalizingFlow((n_signals, n_times), configuration).float().to(DEVICE)
optimizer = torch.optim.Adam(model.parameters(), lr=configuration.learning_rate)
scheduler = optim.lr_scheduler.MultiStepLR(
optimizer, milestones=configuration.milestones, gamma=configuration.gamma
)
training_results: List[Dict] = []
# Iterate over all epochs.
for epoch in range(configuration.epochs):
# TRAIN THE MODEL.
model.train()
loss: float = 0
for tensors, _ in train_dl:
tensors = tensors.float().to(DEVICE)
# Execute the forward and jacobian calculation.
optimizer.zero_grad()
latent_z, jacobian = model.forward(tensors.transpose(2, 1))
jacobian = torch.sum(jacobian, dim=tuple(range(1, jacobian.dim())))
# Back propagation and loss calculation.
| """Contains the training of the normalizing flow model."""
# If deterministic CUDA is activated, some calculations cannot be calculated in parallel on the GPU.
# The training will take much longer but is reproducible.
DETERMINISTIC_CUDA = False
DATASET_PATH = Path.home() / "Downloads" / "voraus-ad-dataset-100hz.parquet"
MODEL_PATH: Optional[Path] = Path.cwd() / "model.pth"
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Define the training configuration and hyperparameters of the model.
configuration = Configuration(
columns="machine",
epochs=70,
frequencyDivider=1,
trainGain=1.0,
seed=177,
batchsize=32,
nCouplingBlocks=4,
clamp=1.2,
learningRate=8e-4,
normalize=True,
pad=True,
nHiddenLayers=0,
scale=2,
kernelSize1=13,
dilation1=2,
kernelSize2=1,
dilation2=1,
kernelSize3=1,
dilation3=1,
milestones=[11, 61],
gamma=0.1,
)
# Make the training reproducible.
torch.manual_seed(configuration.seed)
torch.cuda.manual_seed_all(configuration.seed)
numpy.random.seed(configuration.seed)
random.seed(configuration.seed)
if DETERMINISTIC_CUDA:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Disable pylint too-many-variables here for readability.
# The whole training should run in a single function call.
def train() -> List[Dict]: # pylint: disable=too-many-locals
"""Trains the model with the paper-given parameters.
Returns:
The auroc (mean over categories) and loss per epoch.
"""
# Load the dataset as torch data loaders.
train_dataset, _, train_dl, test_dl = load_torch_dataloaders(
dataset=DATASET_PATH,
batch_size=configuration.batchsize,
columns=Signals.groups()[configuration.columns],
seed=configuration.seed,
frequency_divider=configuration.frequency_divider,
train_gain=configuration.train_gain,
normalize=configuration.normalize,
pad=configuration.pad,
)
# Retrieve the shape of the data for the model initialization.
n_signals = train_dataset.tensors[0].shape[1]
n_times = train_dataset.tensors[0].shape[0]
# Initialize the model, optimizer and scheduler.
model = NormalizingFlow((n_signals, n_times), configuration).float().to(DEVICE)
optimizer = torch.optim.Adam(model.parameters(), lr=configuration.learning_rate)
scheduler = optim.lr_scheduler.MultiStepLR(
optimizer, milestones=configuration.milestones, gamma=configuration.gamma
)
training_results: List[Dict] = []
# Iterate over all epochs.
for epoch in range(configuration.epochs):
# TRAIN THE MODEL.
model.train()
loss: float = 0
for tensors, _ in train_dl:
tensors = tensors.float().to(DEVICE)
# Execute the forward and jacobian calculation.
optimizer.zero_grad()
latent_z, jacobian = model.forward(tensors.transpose(2, 1))
jacobian = torch.sum(jacobian, dim=tuple(range(1, jacobian.dim())))
# Back propagation and loss calculation. | batch_loss = get_loss(latent_z, jacobian) | 2 | 2023-10-18 15:09:24+00:00 | 12k |
invictus717/UniDG | domainbed/scripts/visualize_adaption.py | [
{
"identifier": "datasets",
"path": "domainbed/datasets.py",
"snippet": "DATASETS = [\n # Debug\n \"Debug28\",\n \"Debug224\",\n # Small images\n \"ColoredMNIST\",\n \"RotatedMNIST\",\n # Big images\n \"VLCS\",\n \"PACS\",\n \"OfficeHome\",\n \"TerraIncognita\",\n \"D... | import argparse
import collections
import json
import os
import random
import sys
import time
import uuid
import itertools
import copy
import numpy as np
import PIL
import torch
import torchvision
import torch.utils.data
import itertools
import matplotlib.pyplot as plt
import numpy as np
from argparse import Namespace
from itertools import chain
from domainbed import datasets
from domainbed import hparams_registry
from domainbed import algorithms
from domainbed.lib import misc
from domainbed.lib.misc import accuracy_ent
from domainbed.lib.fast_data_loader import InfiniteDataLoader, FastDataLoader, DataParallelPassthrough
from domainbed import model_selection
from domainbed.lib.query import Q
from domainbed import adapt_algorithms
from MulticoreTSNE import MulticoreTSNE as TSNE | 7,900 | print("\tPython: {}".format(sys.version.split(" ")[0]))
print("\tPyTorch: {}".format(torch.__version__))
print("\tTorchvision: {}".format(torchvision.__version__))
print("\tCUDA: {}".format(torch.version.cuda))
print("\tCUDNN: {}".format(torch.backends.cudnn.version()))
print("\tNumPy: {}".format(np.__version__))
print("\tPIL: {}".format(PIL.__version__))
print('Args:')
for k, v in sorted(vars(args).items()):
print('\t{}: {}'.format(k, v))
if args.hparams_seed == 0:
hparams = hparams_registry.default_hparams(args.algorithm, args.dataset)
else:
hparams = hparams_registry.random_hparams(args.algorithm, args.dataset,
misc.seed_hash(args.hparams_seed, args.trial_seed))
if args.hparams:
hparams.update(json.loads(args.hparams))
print('HParams:')
for k, v in sorted(hparams.items()):
print('\t{}: {}'.format(k, v))
assert os.path.exists(os.path.join(args.output_dir, 'done'))
assert os.path.exists(os.path.join(args.output_dir, 'IID_best.pkl')) # IID_best is produced by train.py
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
if args.dataset in vars(datasets):
dataset = vars(datasets)[args.dataset](args.data_dir,
args.test_envs, hparams)
else:
raise NotImplementedError
# Split each env into an 'in-split' and an 'out-split'. We'll train on
# each in-split except the test envs, and evaluate on all splits.
# To allow unsupervised domain adaptation experiments, we split each test
# env into 'in-split', 'uda-split' and 'out-split'. The 'in-split' is used
# by collect_results.py to compute classification accuracies. The
# 'out-split' is used by the Oracle model selectino method. The unlabeled
# samples in 'uda-split' are passed to the algorithm at training time if
# args.task == "domain_adaptation". If we are interested in comparing
# domain generalization and domain adaptation results, then domain
# generalization algorithms should create the same 'uda-splits', which will
# be discared at training.
in_splits = []
out_splits = []
uda_splits = []
for env_i, env in enumerate(dataset):
uda = []
out, in_ = misc.split_dataset(env,
int(len(env)*args.holdout_fraction),
misc.seed_hash(args.trial_seed, env_i))
if env_i in args.test_envs:
uda, in_ = misc.split_dataset(in_,
int(len(in_)*args.uda_holdout_fraction),
misc.seed_hash(args.trial_seed, env_i))
if hparams['class_balanced']:
in_weights = misc.make_weights_for_balanced_classes(in_)
out_weights = misc.make_weights_for_balanced_classes(out)
if uda is not None:
uda_weights = misc.make_weights_for_balanced_classes(uda)
else:
in_weights, out_weights, uda_weights = None, None, None
in_splits.append((in_, in_weights))
out_splits.append((out, out_weights))
if len(uda):
uda_splits.append((uda, uda_weights))
# Use out splits as training data (to fair comparison with train.py)
train_loaders = [FastDataLoader(
dataset=env,
batch_size=hparams['batch_size'],
num_workers=dataset.N_WORKERS)
for i, (env, env_weights) in enumerate(out_splits)
if i in args.test_envs]
uda_loaders = [InfiniteDataLoader(
dataset=env,
weights=env_weights,
batch_size=hparams['batch_size'],
num_workers=dataset.N_WORKERS)
for i, (env, env_weights) in enumerate(uda_splits)
if i in args.test_envs]
eval_loaders = [FastDataLoader(
dataset=env,
batch_size=args.test_batch_size,
num_workers=dataset.N_WORKERS)
for env, _ in (in_splits + out_splits + uda_splits)]
eval_weights = [None for _, weights in (in_splits + out_splits + uda_splits)]
eval_loader_names = ['env{}_in'.format(i)
for i in range(len(in_splits))]
eval_loader_names += ['env{}_out'.format(i)
for i in range(len(out_splits))]
eval_loader_names += ['env{}_uda'.format(i)
for i in range(len(uda_splits))]
algorithm_class = algorithms.get_algorithm_class(args.algorithm)
algorithm = algorithm_class(dataset.input_shape, dataset.num_classes,
len(dataset) - len(args.test_envs), hparams)
if algorithm_dict is not None:
algorithm.load_state_dict(algorithm_dict)
algorithm.to(device)
if hasattr(algorithm, 'network'):
| # The code is modified from domainbed.scripts.train
def softmax_entropy(x: torch.Tensor) -> torch.Tensor:
"""Entropy of softmax distribution from logits."""
return -(x.softmax(1) * x.log_softmax(1)).sum(1)
class Dataset:
def __init__(self, x, y):
self.x = x
self.y = y
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
def generate_featurelized_loader(loader, network, classifier, batch_size=128):
"""
The classifier adaptation does not need to repeat the heavy forward path,
We speeded up the experiments by converting the observations into representations.
"""
z_list = []
y_list = []
p_list = []
network.eval()
classifier.eval()
for x, y in loader:
x = x.to(device)
z = network(x)
p = classifier(z)
z_list.append(z.detach().cpu())
y_list.append(y.detach().cpu())
p_list.append(p.detach().cpu())
# p_list.append(p.argmax(1).float().cpu().detach())
network.train()
classifier.train()
z = torch.cat(z_list)
y = torch.cat(y_list)
p = torch.cat(p_list)
ent = softmax_entropy(p)
py = p.argmax(1).float().cpu().detach()
dataset1, dataset2 = Dataset(z, y), Dataset(z, py)
loader1 = torch.utils.data.DataLoader(dataset1, batch_size=batch_size, shuffle=False, drop_last=True)
loader2 = torch.utils.data.DataLoader(dataset2, batch_size=batch_size, shuffle=False, drop_last=True)
return loader1, loader2, ent
def visualize_tsne(network, loader, weights, device, adapt,env, name):
print("Start visualizing {}...".format(name))
if adapt:
flag = 'Adapted'
else:
flag = 'Base'
network.eval()
for x, y in loader:
x = x.to(device)
y = y.to(device)
if adapt is False:
p = network(x)
else:
p = network(x, adapt)
x = p.detach().cpu().numpy()
tsne = TSNE(n_components=2).fit_transform(x)
label = np.squeeze(y.cpu().numpy())
plt.figure(figsize=(6, 6))
size=100
line=0.7
t=.8
# plt.scatter(tsne[:, 0], tsne[:, 1], c=label,cmap=plt.get_cmap('hsv'),marker = 'o',linewidths=line,alpha=t,edgecolors='black')
plt.scatter(tsne[:, 0], tsne[:, 1], c=label,cmap=plt.get_cmap('terrain'),marker = 'o',linewidths=line,alpha=t,edgecolors='black')
plt.axis('off')
plt.colorbar()
plt.savefig('./visualization/vis_test_{}_{}_{}.jpg'.format(env,flag,name))
print("Visualization Results Saved...")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Domain generalization')
parser.add_argument('--input_dir', type=str)
parser.add_argument('--adapt_algorithm', type=str, default="UniDG")
args_in = parser.parse_args()
epochs_path = os.path.join(args_in.input_dir, 'results.jsonl')
records = []
with open(epochs_path, 'r') as f:
for line in f:
records.append(json.loads(line[:-1]))
records = Q(records)
r = records[0]
args = Namespace(**r['args'])
print(args)
args.input_dir = args_in.input_dir
if '-' in args_in.adapt_algorithm:
args.adapt_algorithm, test_batch_size = args_in.adapt_algorithm.split('-')
args.test_batch_size = int(test_batch_size)
else:
args.adapt_algorithm = args_in.adapt_algorithm
args.test_batch_size = 128 # default
args.test_batch_size = 128 # default
args.output_dir = args.input_dir
alg_name = args_in.adapt_algorithm
if args.adapt_algorithm in['T3A', 'TentPreBN', 'TentClf', 'PLClf']:
use_featurer_cache = True
else:
use_featurer_cache = False
if os.path.exists(os.path.join(args.output_dir, 'done_{}'.format(alg_name))):
print("{} has already excecuted".format(alg_name))
# If we ever want to implement checkpointing, just persist these values
# every once in a while, and then load them from disk here.
algorithm_dict = None
# os.makedirs(args.output_dir, exist_ok=True)
sys.stdout = misc.Tee(os.path.join(args.output_dir, 'out_{}.txt'.format(alg_name)))
sys.stderr = misc.Tee(os.path.join(args.output_dir, 'err_{}.txt'.format(alg_name)))
print("Environment:")
print("\tPython: {}".format(sys.version.split(" ")[0]))
print("\tPyTorch: {}".format(torch.__version__))
print("\tTorchvision: {}".format(torchvision.__version__))
print("\tCUDA: {}".format(torch.version.cuda))
print("\tCUDNN: {}".format(torch.backends.cudnn.version()))
print("\tNumPy: {}".format(np.__version__))
print("\tPIL: {}".format(PIL.__version__))
print('Args:')
for k, v in sorted(vars(args).items()):
print('\t{}: {}'.format(k, v))
if args.hparams_seed == 0:
hparams = hparams_registry.default_hparams(args.algorithm, args.dataset)
else:
hparams = hparams_registry.random_hparams(args.algorithm, args.dataset,
misc.seed_hash(args.hparams_seed, args.trial_seed))
if args.hparams:
hparams.update(json.loads(args.hparams))
print('HParams:')
for k, v in sorted(hparams.items()):
print('\t{}: {}'.format(k, v))
assert os.path.exists(os.path.join(args.output_dir, 'done'))
assert os.path.exists(os.path.join(args.output_dir, 'IID_best.pkl')) # IID_best is produced by train.py
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
if args.dataset in vars(datasets):
dataset = vars(datasets)[args.dataset](args.data_dir,
args.test_envs, hparams)
else:
raise NotImplementedError
# Split each env into an 'in-split' and an 'out-split'. We'll train on
# each in-split except the test envs, and evaluate on all splits.
# To allow unsupervised domain adaptation experiments, we split each test
# env into 'in-split', 'uda-split' and 'out-split'. The 'in-split' is used
# by collect_results.py to compute classification accuracies. The
# 'out-split' is used by the Oracle model selectino method. The unlabeled
# samples in 'uda-split' are passed to the algorithm at training time if
# args.task == "domain_adaptation". If we are interested in comparing
# domain generalization and domain adaptation results, then domain
# generalization algorithms should create the same 'uda-splits', which will
# be discared at training.
in_splits = []
out_splits = []
uda_splits = []
for env_i, env in enumerate(dataset):
uda = []
out, in_ = misc.split_dataset(env,
int(len(env)*args.holdout_fraction),
misc.seed_hash(args.trial_seed, env_i))
if env_i in args.test_envs:
uda, in_ = misc.split_dataset(in_,
int(len(in_)*args.uda_holdout_fraction),
misc.seed_hash(args.trial_seed, env_i))
if hparams['class_balanced']:
in_weights = misc.make_weights_for_balanced_classes(in_)
out_weights = misc.make_weights_for_balanced_classes(out)
if uda is not None:
uda_weights = misc.make_weights_for_balanced_classes(uda)
else:
in_weights, out_weights, uda_weights = None, None, None
in_splits.append((in_, in_weights))
out_splits.append((out, out_weights))
if len(uda):
uda_splits.append((uda, uda_weights))
# Use out splits as training data (to fair comparison with train.py)
train_loaders = [FastDataLoader(
dataset=env,
batch_size=hparams['batch_size'],
num_workers=dataset.N_WORKERS)
for i, (env, env_weights) in enumerate(out_splits)
if i in args.test_envs]
uda_loaders = [InfiniteDataLoader(
dataset=env,
weights=env_weights,
batch_size=hparams['batch_size'],
num_workers=dataset.N_WORKERS)
for i, (env, env_weights) in enumerate(uda_splits)
if i in args.test_envs]
eval_loaders = [FastDataLoader(
dataset=env,
batch_size=args.test_batch_size,
num_workers=dataset.N_WORKERS)
for env, _ in (in_splits + out_splits + uda_splits)]
eval_weights = [None for _, weights in (in_splits + out_splits + uda_splits)]
eval_loader_names = ['env{}_in'.format(i)
for i in range(len(in_splits))]
eval_loader_names += ['env{}_out'.format(i)
for i in range(len(out_splits))]
eval_loader_names += ['env{}_uda'.format(i)
for i in range(len(uda_splits))]
algorithm_class = algorithms.get_algorithm_class(args.algorithm)
algorithm = algorithm_class(dataset.input_shape, dataset.num_classes,
len(dataset) - len(args.test_envs), hparams)
if algorithm_dict is not None:
algorithm.load_state_dict(algorithm_dict)
algorithm.to(device)
if hasattr(algorithm, 'network'): | algorithm.network = DataParallelPassthrough(algorithm.network) | 7 | 2023-10-15 14:26:12+00:00 | 12k |
jianlanluo/SAQ | vqn/brac_main.py | [
{
"identifier": "SAC",
"path": "vqn/brac.py",
"snippet": "class SAC(object):\n\n @staticmethod\n def get_default_config(updates=None):\n config = ConfigDict()\n config.discount = 0.99\n config.alpha_multiplier = 1.0\n config.use_automatic_entropy_tuning = False\n ... | import os
import time
import uuid
import numpy as np
import gym
import jax
import jax.numpy as jnp
import flax
import absl.app
import absl.flags
from copy import deepcopy
from pprint import pprint
from pprint import pprint
from .brac import SAC
from .replay_buffer import ReplayBuffer, get_d4rl_dataset, subsample_batch
from .replay_buffer import ReplayBuffer, get_d4rl_dataset, subsample_batch
from .jax_utils import batch_to_jax
from .model import TanhGaussianPolicy, FullyConnectedQFunction, SamplerPolicy
from .sampler import StepSampler, TrajSampler
from .utils import (
Timer, define_flags_with_default, set_random_seed, print_flags,
get_user_flags, prefix_metrics, WandBLogger
)
from viskit.logging import logger, setup_logger | 8,624 |
FLAGS_DEF = define_flags_with_default(
env='HalfCheetah-v2',
max_traj_length=1000,
replay_buffer_size=1000000,
seed=42,
save_model=False,
reward_scale=1.0,
reward_bias=0.0,
clip_action=0.999,
policy_arch='256-256',
qf_arch='256-256',
orthogonal_init=False,
policy_log_std_multiplier=1.0,
policy_log_std_offset=-1.0,
n_epochs=2000,
n_pi_beta_epochs=5000,
n_train_step_per_epoch=1000,
eval_period=10,
eval_n_trajs=5,
batch_size=256,
sac=SAC.get_default_config(),
logging=WandBLogger.get_default_config(),
)
def main(argv):
FLAGS = absl.flags.FLAGS
variant = get_user_flags(FLAGS, FLAGS_DEF)
wandb_logger = WandBLogger(config=FLAGS.logging, variant=variant)
setup_logger(
variant=variant,
exp_id=wandb_logger.experiment_id,
seed=FLAGS.seed,
base_log_dir=FLAGS.logging.output_dir,
include_exp_prefix_sub_dir=False
)
set_random_seed(FLAGS.seed)
eval_sampler = TrajSampler(gym.make(FLAGS.env).unwrapped, FLAGS.max_traj_length)
dataset = get_d4rl_dataset(eval_sampler.env)
dataset['rewards'] = dataset['rewards'] * FLAGS.reward_scale + FLAGS.reward_bias
dataset['actions'] = np.clip(dataset['actions'], -FLAGS.clip_action, FLAGS.clip_action)
dataset = get_d4rl_dataset(eval_sampler.env)
dataset['rewards'] = dataset['rewards'] * FLAGS.reward_scale + FLAGS.reward_bias
dataset['actions'] = np.clip(dataset['actions'], -FLAGS.clip_action, FLAGS.clip_action)
observation_dim = eval_sampler.env.observation_space.shape[0]
action_dim = eval_sampler.env.action_space.shape[0]
policy = TanhGaussianPolicy(
observation_dim, action_dim, FLAGS.policy_arch, FLAGS.orthogonal_init,
FLAGS.policy_log_std_multiplier, FLAGS.policy_log_std_offset, use_tanh=True
)
behavior_policy = TanhGaussianPolicy(
observation_dim, action_dim, FLAGS.policy_arch, FLAGS.orthogonal_init,
FLAGS.policy_log_std_multiplier, FLAGS.policy_log_std_offset, use_tanh=False
)
qf = FullyConnectedQFunction(observation_dim, action_dim, FLAGS.qf_arch, FLAGS.orthogonal_init)
if FLAGS.sac.target_entropy >= 0.0:
FLAGS.sac.target_entropy = -np.prod(eval_sampler.env.action_space.shape).item()
sac = SAC(FLAGS.sac, behavior_policy, policy, qf)
sampler_policy = SamplerPolicy(sac.policy, sac.train_params['policy'])
viskit_metrics = {}
for pi_beta_epoch in range(FLAGS.n_pi_beta_epochs):
metrics = {'behavior_policy_epoch': pi_beta_epoch}
for batch_idx in range(FLAGS.n_train_step_per_epoch):
batch = batch_to_jax(subsample_batch(dataset, FLAGS.batch_size))
|
FLAGS_DEF = define_flags_with_default(
env='HalfCheetah-v2',
max_traj_length=1000,
replay_buffer_size=1000000,
seed=42,
save_model=False,
reward_scale=1.0,
reward_bias=0.0,
clip_action=0.999,
policy_arch='256-256',
qf_arch='256-256',
orthogonal_init=False,
policy_log_std_multiplier=1.0,
policy_log_std_offset=-1.0,
n_epochs=2000,
n_pi_beta_epochs=5000,
n_train_step_per_epoch=1000,
eval_period=10,
eval_n_trajs=5,
batch_size=256,
sac=SAC.get_default_config(),
logging=WandBLogger.get_default_config(),
)
def main(argv):
FLAGS = absl.flags.FLAGS
variant = get_user_flags(FLAGS, FLAGS_DEF)
wandb_logger = WandBLogger(config=FLAGS.logging, variant=variant)
setup_logger(
variant=variant,
exp_id=wandb_logger.experiment_id,
seed=FLAGS.seed,
base_log_dir=FLAGS.logging.output_dir,
include_exp_prefix_sub_dir=False
)
set_random_seed(FLAGS.seed)
eval_sampler = TrajSampler(gym.make(FLAGS.env).unwrapped, FLAGS.max_traj_length)
dataset = get_d4rl_dataset(eval_sampler.env)
dataset['rewards'] = dataset['rewards'] * FLAGS.reward_scale + FLAGS.reward_bias
dataset['actions'] = np.clip(dataset['actions'], -FLAGS.clip_action, FLAGS.clip_action)
dataset = get_d4rl_dataset(eval_sampler.env)
dataset['rewards'] = dataset['rewards'] * FLAGS.reward_scale + FLAGS.reward_bias
dataset['actions'] = np.clip(dataset['actions'], -FLAGS.clip_action, FLAGS.clip_action)
observation_dim = eval_sampler.env.observation_space.shape[0]
action_dim = eval_sampler.env.action_space.shape[0]
policy = TanhGaussianPolicy(
observation_dim, action_dim, FLAGS.policy_arch, FLAGS.orthogonal_init,
FLAGS.policy_log_std_multiplier, FLAGS.policy_log_std_offset, use_tanh=True
)
behavior_policy = TanhGaussianPolicy(
observation_dim, action_dim, FLAGS.policy_arch, FLAGS.orthogonal_init,
FLAGS.policy_log_std_multiplier, FLAGS.policy_log_std_offset, use_tanh=False
)
qf = FullyConnectedQFunction(observation_dim, action_dim, FLAGS.qf_arch, FLAGS.orthogonal_init)
if FLAGS.sac.target_entropy >= 0.0:
FLAGS.sac.target_entropy = -np.prod(eval_sampler.env.action_space.shape).item()
sac = SAC(FLAGS.sac, behavior_policy, policy, qf)
sampler_policy = SamplerPolicy(sac.policy, sac.train_params['policy'])
viskit_metrics = {}
for pi_beta_epoch in range(FLAGS.n_pi_beta_epochs):
metrics = {'behavior_policy_epoch': pi_beta_epoch}
for batch_idx in range(FLAGS.n_train_step_per_epoch):
batch = batch_to_jax(subsample_batch(dataset, FLAGS.batch_size)) | metrics.update(prefix_metrics(sac.train_behavior_policy(batch), 'behavior_policy')) | 18 | 2023-10-18 06:31:20+00:00 | 12k |
naver-ai/dual-teacher | tools/train.py | [
{
"identifier": "__version__",
"path": "mmseg/version.py",
"snippet": "def parse_version_info(version_str):"
},
{
"identifier": "set_random_seed",
"path": "mmseg/apis/train.py",
"snippet": "def set_random_seed(seed, deterministic=False):\n \"\"\"Set random seed.\n Args:\n se... | import argparse
import copy
import os
import os.path as osp
import time
import logging
import mmcv
import torch
import numpy as np
import seg_core.eval_seg as eval_seg
import torch.nn.functional as F
import warnings
import torch.distributed as dist
import random
import tempfile
from mmcv.runner import init_dist
from mmcv.utils import Config, DictAction, get_git_hash
from torchvision.transforms import ToTensor
from mmseg import __version__
from mmseg.apis import set_random_seed, train_segmentor
from mmseg.datasets import build_dataset, build_dataloader
from mmseg.models import build_segmentor
from mmseg.utils import collect_env, get_root_logger
from seg_core.model import MiT_SegFormer
from seg_core.optimizer import PolyWarmupAdamW
from seg_core.augmentations import ClassMixLoss, compute_classmix, compute_cutmix, compute_ic
from torchvision.utils import save_image
from dist_helper import setup_distributed
from mmseg.apis import single_gpu_test
from mmcv.image import tensor2imgs
from PIL import Image, ImageOps, ImageFilter
from torchvision import transforms
from copy import deepcopy | 8,161 |
def setup_logger(filename='test.log'):
## setup logger
# logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(filename)s - %(levelname)s: %(message)s')
logFormatter = logging.Formatter('%(asctime)s - %(filename)s - %(levelname)s: %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fHandler = logging.FileHandler(filename, mode='w')
fHandler.setFormatter(logFormatter)
logger.addHandler(fHandler)
cHandler = logging.StreamHandler()
cHandler.setFormatter(logFormatter)
logger.addHandler(cHandler)
def parse_args():
parser = argparse.ArgumentParser(description='Train a segmentor')
parser.add_argument('--ddp', default=False, action='store_true')
parser.add_argument('--dual_teacher', default=False, action='store_true')
parser.add_argument('--unimatch_aug', default=False, action='store_true')
parser.add_argument('--save_path', type=str, help='log moemo')
parser.add_argument('--out', default='work_dirs/res.pkl', help='output result file in pickle format')
parser.add_argument('--config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument('--load-from', help='the checkpoint file to load weights from')
parser.add_argument('--resume-from', help='the checkpoint file to resume from')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument('--gpus', type=int, help='number of gpus to use (only applicable to non-distributed training)')
group_gpus.add_argument('--gpu-ids', type=int, nargs='+', help='ids of gpus to use only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--options', nargs='+', action=DictAction, help='custom options')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument("--backbone", type=str)
parser.add_argument("--port", default=None, type=int)
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--dc', default=False, action='store_true')
args = parser.parse_args()
# if 'LOCAL_RANK' not in os.environ:
# os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def np2tmp(array, temp_file_name=None):
"""Save ndarray to local numpy file.
Args:
array (ndarray): Ndarray to save.
temp_file_name (str): Numpy file name. If 'temp_file_name=None', this
function will generate a file name with tempfile.NamedTemporaryFile
to save ndarray. Default: None.
Returns:
str: The numpy file name.
"""
if temp_file_name is None:
temp_file_name = tempfile.NamedTemporaryFile(
suffix='.npy', delete=False).name
np.save(temp_file_name, array)
return temp_file_name
def image_saver(input, name):
"""
:param name: "path/name"
"""
if input.dim() == 3:
input = input.unsqueeze(dim=0)
save_image(input.float(), str(name) + '.jpg')
def main():
setup_logger()
args = parse_args()
mit_type = args.backbone[-1]
if mit_type == '5':
args.config = 'local_configs/segformer/B' + mit_type + '/segformer.b' + mit_type + '.640x640.ade.160k.py'
else:
args.config = 'local_configs/segformer/B' + mit_type + '/segformer.b' + mit_type + '.512x512.ade.160k.py'
cfg = Config.fromfile(args.config)
if args.options is not None:
cfg.merge_from_dict(args.options)
torch.backends.cudnn.benchmark = False
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0])
if args.load_from is not None:
cfg.load_from = args.load_from
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
distributed = False
if args.ddp:
rank, word_size = setup_distributed(port=args.port)
distributed = True
else:
rank = 0
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
| """
Dual-Teacher
Copyright (c) 2023-present NAVER Cloud Corp.
distributed under NVIDIA Source Code License for SegFormer
--------------------------------------------------------
References:
SegFormer: https://github.com/NVlabs/SegFormer
--------------------------------------------------------
"""
warnings.filterwarnings("ignore")
criterion_u = torch.nn.CrossEntropyLoss(reduction='none').cuda()
def train_sup(args, model, optimizer, train_loader, val_loader, criterion, max_iters, print_iters, eval_iters):
train_iterator = iter(train_loader)
if args.ddp:
rank, world_size = dist.get_rank(), dist.get_world_size()
else:
rank = 0
for epoch in range(200):
for i in range(len(train_loader)):
model.train()
try:
batch_data = next(train_iterator)
except:
train_iterator = iter(train_loader)
batch_data = next(train_iterator)
image = batch_data['img'].data[0].cuda(non_blocking=True)
label = batch_data['gt_semantic_seg'].data[0].squeeze(dim=1).cuda(non_blocking=True)
outputs = model(image)
outputs = F.interpolate(outputs, size=label.shape[1:], mode='bilinear', align_corners=False)
seg_loss = criterion(outputs, label.type(torch.long))
optimizer.zero_grad()
seg_loss.backward()
optimizer.step()
if rank == 0:
lr = optimizer.param_groups[0]['lr']
logging.info("save_path:{}".format(args.save_path))
logging.info("Iter: %d; LR: %.3e; seg_loss: %f" % (i + 1, lr, seg_loss.item()))
print("Iter: %d; LR: %.3e; seg_loss: %f" % (i + 1, lr, seg_loss.item()))
logging.info('[iter:{}] Validation:'.format(i + 1))
print('[iter:{}] Validation:'.format(i + 1))
val_score = val(model.module, val_loader)
logging.info('mIoU:{:.5f}'.format(val_score['Mean IoU'] * 100))
print('mIoU:{:.5f}'.format(val_score['Mean IoU'] * 100))
model.train()
def train_dual(args, model, model_teacher, model_teacher2, optimizer, train_loader, train_loader_u, val_loader, criterion, cm_loss_fn, max_iters, print_iters, eval_iters):
if args.ddp:
rank, world_size = dist.get_rank(), dist.get_world_size()
else:
rank = 0
best_miou, best_epoch = 0, 0
for epoch in range(200):
model.train()
train_loader.sampler.set_epoch(epoch)
train_loader_u.sampler.set_epoch(epoch)
train_iterator = iter(train_loader)
train_iterator_u = iter(train_loader_u)
if epoch % 2 == 0:
ema_model = model_teacher
do_cut_mix = True
do_class_mix = False
else:
ema_model = model_teacher2
do_cut_mix = False
do_class_mix = True
ema_model.train()
for i in range(len(train_loader)):
try:
batch_data_u = next(train_iterator_u)
except:
train_iterator_u = iter(train_loader_u)
batch_data_u = next(train_iterator_u)
try:
batch_data = next(train_iterator)
except:
train_iterator = iter(train_loader)
batch_data = next(train_iterator)
image = batch_data['img'].data[0].cuda(non_blocking=True)
label = batch_data['gt_semantic_seg'].data[0].squeeze(dim=1).cuda(non_blocking=True)
image_u = batch_data_u['img'].data[0].cuda(non_blocking=True)
label_u = batch_data['gt_semantic_seg'].data[0].squeeze(dim=1).cuda(non_blocking=True)
b, _, h, w = image.shape
image_u_strong = deepcopy(image_u)
image_u_strong = transforms.ColorJitter(0.5, 0.5, 0.5, 0.25)(image_u_strong)
image_u_strong = transforms.RandomGrayscale(p=0.2)(image_u_strong)
if do_class_mix:
loss = compute_classmix(b, h, w, criterion, cm_loss_fn, model, ema_model, image, label, image_u, image_u_strong, threshold=0.95)
if do_cut_mix:
loss = compute_cutmix(h, w, image, label, criterion, model, ema_model, image_u, threshold=0.95)
loss_dc = compute_ic(model, ema_model, image_u, image_u_strong, criterion_u, label_u, h, w, threshold=0.95)
total_loss = loss + loss_dc * 0.2
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
if args.ddp:
reduced_loss = loss.clone().detach()
dist.all_reduce(reduced_loss)
update_ema(model_teacher=ema_model, model=model, alpha_teacher=0.99, iteration=i)
if rank == 0:
if (i + 1) % print_iters == 0:
lr = optimizer.param_groups[0]['lr']
logging.info("Epoch: %d; Iter: %d; LR: %.3e; loss: %f" % (epoch, i + 1, lr, loss.item()))
print("Epoch: %d; Iter: %d; LR: %.3e; loss: %f" % (epoch, i + 1, lr, loss.item()))
if rank == 0:
logging.info('[Epoch {}] [iter:{}] Validation:'.format(epoch, i + 1))
print('[Epoch {}] [iter:{}] Validation:'.format(epoch, i + 1))
val_score = val(model.module, val_loader)
miou = val_score['Mean IoU'] * 100
if miou > best_miou:
best_miou = miou
best_epoch = epoch
logging.info('mIoU:{:.5f} Best mIOU:{:.5f} on epoch {}'.format(miou, best_miou, best_epoch))
print('mIoU:{:.5f} Best mIOU:{:.5f} on epoch {}'.format(miou, best_miou, best_epoch))
model.train()
def synchronize():
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def val(model, data_loader):
model.eval()
preds, gts = [], []
for i, data in enumerate(data_loader):
with torch.no_grad():
image = data['img'][0].cuda(non_blocking=True)
label = data['gt_semantic_seg'][0].cuda(non_blocking=True)
outputs = model(image)
resized_outputs = F.interpolate(outputs, size=label.shape[1:], mode='bilinear', align_corners=False)
preds += list(torch.argmax(resized_outputs, dim=1).cpu().numpy().astype(np.int16))
gts += list(label.cpu().numpy().astype(np.int16))
score = eval_seg.scores(gts, preds, num_classes=150)
model.train()
return score
def val_ddp(args, epoch, model, data_loader):
model.eval()
preds, gts = [], []
if args.ddp:
data_loader.sampler.set_epoch(epoch)
rank, world_size = dist.get_rank(), dist.get_world_size()
else:
rank = 0
for i, data in enumerate(data_loader):
with torch.no_grad():
# print(data)
image = data['img'][0].cuda(non_blocking=True)
label = data['gt_semantic_seg'][0].cuda(non_blocking=True)
outputs = model(image)
resized_outputs = F.interpolate(outputs, size=label.shape[1:], mode='bilinear', align_corners=False)
preds += list(torch.argmax(resized_outputs, dim=1).cpu().numpy().astype(np.int16))
gts += list(label.cpu().numpy().astype(np.int16))
if args.ddp:
preds = torch.from_numpy(np.array(preds)).cuda()
gts = torch.from_numpy(np.array(gts)).cuda()
dist.all_reduce(preds)
dist.all_reduce(gts)
gts = list(gts)
preds = list(preds)
score = eval_seg.scores(gts, preds, num_classes=150)
return score
def intersectionAndUnion(output, target, K, ignore_index):
# 'K' classes, output and target sizes are N or N * L or N * H * W, each value in range 0 to K - 1.
assert output.ndim in [1, 2, 3]
assert output.shape == target.shape
output = output.reshape(output.size).copy()
target = target.reshape(target.size)
output[np.where(target == ignore_index)[0]] = ignore_index
intersection = output[np.where(output == target)[0]]
area_intersection, _ = np.histogram(intersection, bins=np.arange(K + 1))
area_output, _ = np.histogram(output, bins=np.arange(K + 1))
area_target, _ = np.histogram(target, bins=np.arange(K + 1))
area_union = area_output + area_target - area_intersection
return area_intersection, area_union, area_target
def update_ema(model_teacher, model, alpha_teacher, iteration):
with torch.no_grad():
alpha_teacher = min(1 - 1 / (iteration + 1), alpha_teacher)
for ema_param, param in zip(model_teacher.parameters(), model.parameters()):
ema_param.data[:] = alpha_teacher * ema_param[:].data[:] + (1 - alpha_teacher) * param[:].data[:]
def setup_logger(filename='test.log'):
## setup logger
# logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(filename)s - %(levelname)s: %(message)s')
logFormatter = logging.Formatter('%(asctime)s - %(filename)s - %(levelname)s: %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fHandler = logging.FileHandler(filename, mode='w')
fHandler.setFormatter(logFormatter)
logger.addHandler(fHandler)
cHandler = logging.StreamHandler()
cHandler.setFormatter(logFormatter)
logger.addHandler(cHandler)
def parse_args():
parser = argparse.ArgumentParser(description='Train a segmentor')
parser.add_argument('--ddp', default=False, action='store_true')
parser.add_argument('--dual_teacher', default=False, action='store_true')
parser.add_argument('--unimatch_aug', default=False, action='store_true')
parser.add_argument('--save_path', type=str, help='log moemo')
parser.add_argument('--out', default='work_dirs/res.pkl', help='output result file in pickle format')
parser.add_argument('--config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument('--load-from', help='the checkpoint file to load weights from')
parser.add_argument('--resume-from', help='the checkpoint file to resume from')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument('--gpus', type=int, help='number of gpus to use (only applicable to non-distributed training)')
group_gpus.add_argument('--gpu-ids', type=int, nargs='+', help='ids of gpus to use only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument('--deterministic', action='store_true', help='whether to set deterministic options for CUDNN backend.')
parser.add_argument('--options', nargs='+', action=DictAction, help='custom options')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], default='none', help='job launcher')
parser.add_argument("--backbone", type=str)
parser.add_argument("--port", default=None, type=int)
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--dc', default=False, action='store_true')
args = parser.parse_args()
# if 'LOCAL_RANK' not in os.environ:
# os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def np2tmp(array, temp_file_name=None):
"""Save ndarray to local numpy file.
Args:
array (ndarray): Ndarray to save.
temp_file_name (str): Numpy file name. If 'temp_file_name=None', this
function will generate a file name with tempfile.NamedTemporaryFile
to save ndarray. Default: None.
Returns:
str: The numpy file name.
"""
if temp_file_name is None:
temp_file_name = tempfile.NamedTemporaryFile(
suffix='.npy', delete=False).name
np.save(temp_file_name, array)
return temp_file_name
def image_saver(input, name):
"""
:param name: "path/name"
"""
if input.dim() == 3:
input = input.unsqueeze(dim=0)
save_image(input.float(), str(name) + '.jpg')
def main():
setup_logger()
args = parse_args()
mit_type = args.backbone[-1]
if mit_type == '5':
args.config = 'local_configs/segformer/B' + mit_type + '/segformer.b' + mit_type + '.640x640.ade.160k.py'
else:
args.config = 'local_configs/segformer/B' + mit_type + '/segformer.b' + mit_type + '.512x512.ade.160k.py'
cfg = Config.fromfile(args.config)
if args.options is not None:
cfg.merge_from_dict(args.options)
torch.backends.cudnn.benchmark = False
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs', osp.splitext(osp.basename(args.config))[0])
if args.load_from is not None:
cfg.load_from = args.load_from
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
distributed = False
if args.ddp:
rank, word_size = setup_distributed(port=args.port)
distributed = True
else:
rank = 0
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log') | logger = get_root_logger(log_file=log_file, log_level=cfg.log_level) | 7 | 2023-10-19 04:04:31+00:00 | 12k |
SLDGroup/G-CASCADE | lib/maxxvit_4out.py | [
{
"identifier": "build_model_with_cfg",
"path": "lib/models_timm/helpers.py",
"snippet": "def build_model_with_cfg(\n model_cls: Callable,\n variant: str,\n pretrained: bool,\n pretrained_cfg: Optional[Dict] = None,\n model_cfg: Optional[Any] = None,\n feature_c... | import math
import torch
from collections import OrderedDict
from dataclasses import dataclass, replace
from functools import partial
from typing import Callable, Optional, Union, Tuple, List
from torch import nn
from torch.utils.checkpoint import checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from lib.models_timm.helpers import build_model_with_cfg, checkpoint_seq, named_apply
from lib.models_timm.fx_features import register_notrace_function
from lib.models_timm.layers import Mlp, ConvMlp, DropPath, ClassifierHead, trunc_normal_tf_, LayerNorm2d, LayerNorm
from lib.models_timm.layers import create_attn, get_act_layer, get_norm_layer, get_norm_act_layer, create_conv2d
from lib.models_timm.layers import to_2tuple, extend_tuple, make_divisible, _assert
from lib.models_timm.registry import register_model
from lib.models_timm.vision_transformer_relpos import RelPosMlp, RelPosBias # FIXME move these to common location | 9,272 |
def cfg_window_size(cfg: MaxxVitTransformerCfg, img_size: Tuple[int, int]):
if cfg.window_size is not None:
assert cfg.grid_size
return cfg
partition_size = img_size[0] // cfg.partition_ratio, img_size[1] // cfg.partition_ratio
cfg = replace(cfg, window_size=partition_size, grid_size=partition_size)
return cfg
class MaxxVit(nn.Module):
""" CoaTNet + MaxVit base model.
Highly configurable for different block compositions, tensor layouts, pooling types.
"""
def __init__(
self,
cfg: MaxxVitCfg,
img_size: Union[int, Tuple[int, int]] = 224,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
drop_rate: float = 0.,
drop_path_rate: float = 0.
):
super().__init__()
img_size = to_2tuple(img_size)
transformer_cfg = cfg_window_size(cfg.transformer_cfg, img_size)
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = cfg.embed_dim[-1]
self.embed_dim = cfg.embed_dim
self.drop_rate = drop_rate
self.grad_checkpointing = False
self.stem = Stem(
in_chs=in_chans,
out_chs=cfg.stem_width,
act_layer=cfg.conv_cfg.act_layer,
norm_layer=cfg.conv_cfg.norm_layer,
norm_eps=cfg.conv_cfg.norm_eps,
)
stride = self.stem.stride
feat_size = tuple([i // s for i, s in zip(img_size, to_2tuple(stride))])
num_stages = len(cfg.embed_dim)
assert len(cfg.depths) == num_stages
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)]
in_chs = self.stem.out_chs
#final_norm_layer = get_norm_layer(cfg.transformer_cfg.norm_layer)
stages = []
#norms = []
for i in range(num_stages):
stage_stride = 2
out_chs = cfg.embed_dim[i]
feat_size = tuple([(r - 1) // stage_stride + 1 for r in feat_size])
stages += [MaxxVitStage(
in_chs,
out_chs,
depth=cfg.depths[i],
block_types=cfg.block_type[i],
conv_cfg=cfg.conv_cfg,
transformer_cfg=transformer_cfg,
feat_size=feat_size,
drop_path=dpr[i],
)]
#norms.append(final_norm_layer(out_chs, eps=cfg.transformer_cfg.norm_eps))
stride *= stage_stride
in_chs = out_chs
self.stages = nn.Sequential(*stages)
#self.norms = nn.Sequential(*norms)
final_norm_layer = get_norm_layer(cfg.transformer_cfg.norm_layer)
self.norm = final_norm_layer(self.num_features, eps=cfg.transformer_cfg.norm_eps)
# Classifier head
#self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate)
# Weight init (default PyTorch init works well for AdamW if scheme not set)
assert cfg.weight_init in ('', 'normal', 'trunc_normal', 'xavier_normal', 'vit_eff')
if cfg.weight_init:
named_apply(partial(self._init_weights, scheme=cfg.weight_init), self)
def _init_weights(self, module, name, scheme=''):
if hasattr(module, 'init_weights'):
try:
module.init_weights(scheme=scheme)
except TypeError:
module.init_weights()
@torch.jit.ignore
def no_weight_decay(self):
return {
k for k, _ in self.named_parameters()
if any(n in k for n in ["relative_position_bias_table", "rel_pos.mlp"])}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem', # stem and embed
blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is None:
global_pool = self.head.global_pool.pool_type
| """ MaxVit and CoAtNet Vision Transformer - CNN Hybrids in PyTorch
This is a from-scratch implementation of both CoAtNet and MaxVit in PyTorch.
99% of the implementation was done from papers, however last minute some adjustments were made
based on the (as yet unfinished?) public code release https://github.com/google-research/maxvit
There are multiple sets of models defined for both architectures. Typically, names with a
`_rw` suffix are my own original configs prior to referencing https://github.com/google-research/maxvit.
These configs work well and appear to be a bit faster / lower resource than the paper.
The models without extra prefix / suffix' (coatnet_0_224, maxvit_tiny_224, etc), are intended to
match paper, BUT, without any official pretrained weights it's difficult to confirm a 100% match.
# FIXME / WARNING
This impl remains a WIP, some configs and models may vanish or change...
Papers:
MaxViT: Multi-Axis Vision Transformer - https://arxiv.org/abs/2204.01697
@article{tu2022maxvit,
title={MaxViT: Multi-Axis Vision Transformer},
author={Tu, Zhengzhong and Talebi, Hossein and Zhang, Han and Yang, Feng and Milanfar, Peyman and Bovik, Alan and Li, Yinxiao},
journal={ECCV},
year={2022},
}
CoAtNet: Marrying Convolution and Attention for All Data Sizes - https://arxiv.org/abs/2106.04803
@article{DBLP:journals/corr/abs-2106-04803,
author = {Zihang Dai and Hanxiao Liu and Quoc V. Le and Mingxing Tan},
title = {CoAtNet: Marrying Convolution and Attention for All Data Sizes},
journal = {CoRR},
volume = {abs/2106.04803},
year = {2021}
}
Hacked together by / Copyright 2022, Ross Wightman
"""
__all__ = ['MaxxVitCfg', 'MaxxVitConvCfg', 'MaxxVitTransformerCfg', 'MaxxVit']
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.95, 'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
'first_conv': 'stem.conv1', 'classifier': 'head.fc',
'fixed_input_size': True,
**kwargs
}
default_cfgs = {
# Fiddling with configs / defaults / still pretraining
'coatnet_pico_rw_224': _cfg(url=''),
'coatnet_nano_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_nano_rw_224_sw-f53093b4.pth',
crop_pct=0.9),
'coatnet_0_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_0_rw_224_sw-a6439706.pth'),
'coatnet_1_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_1_rw_224_sw-5cae1ea8.pth'
),
'coatnet_2_rw_224': _cfg(url=''),
'coatnet_3_rw_224': _cfg(url=''),
# Highly experimental configs
'coatnet_bn_0_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_bn_0_rw_224_sw-c228e218.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD,
crop_pct=0.95),
'coatnet_rmlp_nano_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_nano_rw_224_sw-bd1d51b3.pth',
crop_pct=0.9),
'coatnet_rmlp_0_rw_224': _cfg(url=''),
'coatnet_rmlp_1_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_1_rw_224_sw-9051e6c3.pth'),
'coatnet_rmlp_2_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnet_rmlp_2_rw_224_sw-5ccfac55.pth'),
'coatnet_rmlp_3_rw_224': _cfg(url=''),
'coatnet_nano_cc_224': _cfg(url=''),
'coatnext_nano_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/coatnext_nano_rw_224_ad-22cb71c2.pth',
crop_pct=0.9),
# Trying to be like the CoAtNet paper configs
'coatnet_0_224': _cfg(url=''),
'coatnet_1_224': _cfg(url=''),
'coatnet_2_224': _cfg(url=''),
'coatnet_3_224': _cfg(url=''),
'coatnet_4_224': _cfg(url=''),
'coatnet_5_224': _cfg(url=''),
# Experimental configs
'maxvit_pico_rw_256': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_nano_rw_256': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_nano_rw_256_sw-fb127241.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_tiny_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_tiny_rw_224_sw-7d0dffeb.pth'),
'maxvit_tiny_rw_256': _cfg(
url='',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_rmlp_pico_rw_256': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_pico_rw_256_sw-8d82f2c6.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_rmlp_nano_rw_256': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_nano_rw_256_sw-c17bb0d6.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_rmlp_tiny_rw_256': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_tiny_rw_256_sw-bbef0ff5.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_rmlp_small_rw_224': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxvit_rmlp_small_rw_224_sw-6ef0ae4f.pth',
crop_pct=0.9,
),
'maxvit_rmlp_small_rw_256': _cfg(
url='',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxvit_tiny_pm_256': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)),
'maxxvit_rmlp_nano_rw_256': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxxvit_rmlp_nano_rw_256_sw-0325d459.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
'maxxvit_rmlp_tiny_rw_256': _cfg(url='', input_size=(3, 256, 256), pool_size=(8, 8)),
'maxxvit_rmlp_small_rw_256': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-maxx/maxxvit_rmlp_small_rw_256_sw-37e217ff.pth',
input_size=(3, 256, 256), pool_size=(8, 8)),
# Trying to be like the MaxViT paper configs
'maxvit_tiny_224': _cfg(url=''),
'maxvit_small_224': _cfg(url=''),
'maxvit_base_224': _cfg(url=''),
'maxvit_large_224': _cfg(url=''),
'maxvit_xlarge_224': _cfg(url=''),
}
@dataclass
class MaxxVitTransformerCfg:
dim_head: int = 32
expand_ratio: float = 4.0
expand_first: bool = True
shortcut_bias: bool = True
attn_bias: bool = True
attn_drop: float = 0.
proj_drop: float = 0.
pool_type: str = 'avg2'
rel_pos_type: str = 'bias'
rel_pos_dim: int = 512 # for relative position types w/ MLP
partition_ratio: int = 32
window_size: Optional[Tuple[int, int]] = None
grid_size: Optional[Tuple[int, int]] = None
init_values: Optional[float] = None
act_layer: str = 'gelu'
norm_layer: str = 'layernorm2d'
norm_layer_cl: str = 'layernorm'
norm_eps: float = 1e-6
def __post_init__(self):
if self.grid_size is not None:
self.grid_size = to_2tuple(self.grid_size)
if self.window_size is not None:
self.window_size = to_2tuple(self.window_size)
if self.grid_size is None:
self.grid_size = self.window_size
@dataclass
class MaxxVitConvCfg:
block_type: str = 'mbconv'
expand_ratio: float = 4.0
expand_output: bool = True # calculate expansion channels from output (vs input chs)
kernel_size: int = 3
group_size: int = 1 # 1 == depthwise
pre_norm_act: bool = False # activation after pre-norm
output_bias: bool = True # bias for shortcut + final 1x1 projection conv
stride_mode: str = 'dw' # stride done via one of 'pool', '1x1', 'dw'
pool_type: str = 'avg2'
downsample_pool_type: str = 'avg2'
attn_early: bool = False # apply attn between conv2 and norm2, instead of after norm2
attn_layer: str = 'se'
attn_act_layer: str = 'silu'
attn_ratio: float = 0.25
init_values: Optional[float] = 1e-6 # for ConvNeXt block, ignored by MBConv
act_layer: str = 'gelu'
norm_layer: str = ''
norm_layer_cl: str = ''
norm_eps: Optional[float] = None
def __post_init__(self):
# mbconv vs convnext blocks have different defaults, set in post_init to avoid explicit config args
assert self.block_type in ('mbconv', 'convnext')
use_mbconv = self.block_type == 'mbconv'
if not self.norm_layer:
self.norm_layer = 'batchnorm2d' if use_mbconv else 'layernorm2d'
if not self.norm_layer_cl and not use_mbconv:
self.norm_layer_cl = 'layernorm'
if self.norm_eps is None:
self.norm_eps = 1e-5 if use_mbconv else 1e-6
self.downsample_pool_type = self.downsample_pool_type or self.pool_type
@dataclass
class MaxxVitCfg:
embed_dim: Tuple[int, ...] = (96, 192, 384, 768)
depths: Tuple[int, ...] = (2, 3, 5, 2)
block_type: Tuple[Union[str, Tuple[str, ...]], ...] = ('C', 'C', 'T', 'T')
stem_width: Union[int, Tuple[int, int]] = 64
stem_bias: bool = True
conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg()
transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg()
weight_init: str = 'vit_eff'
def _rw_coat_cfg(
stride_mode='pool',
pool_type='avg2',
conv_output_bias=False,
conv_attn_early=False,
conv_attn_act_layer='relu',
conv_norm_layer='',
transformer_shortcut_bias=True,
transformer_norm_layer='layernorm2d',
transformer_norm_layer_cl='layernorm',
init_values=None,
rel_pos_type='bias',
rel_pos_dim=512,
):
# 'RW' timm variant models were created and trained before seeing https://github.com/google-research/maxvit
# Common differences for initial timm models:
# - pre-norm layer in MZBConv included an activation after norm
# - mbconv expansion calculated from input instead of output chs
# - mbconv shortcut and final 1x1 conv did not have a bias
# - SE act layer was relu, not silu
# - mbconv uses silu in timm, not gelu
# - expansion in attention block done via output proj, not input proj
# Variable differences (evolved over training initial models):
# - avg pool with kernel_size=2 favoured downsampling (instead of maxpool for coat)
# - SE attention was between conv2 and norm/act
# - default to avg pool for mbconv downsample instead of 1x1 or dw conv
# - transformer block shortcut has no bias
return dict(
conv_cfg=MaxxVitConvCfg(
stride_mode=stride_mode,
pool_type=pool_type,
pre_norm_act=True,
expand_output=False,
output_bias=conv_output_bias,
attn_early=conv_attn_early,
attn_act_layer=conv_attn_act_layer,
act_layer='silu',
norm_layer=conv_norm_layer,
),
transformer_cfg=MaxxVitTransformerCfg(
expand_first=False,
shortcut_bias=transformer_shortcut_bias,
pool_type=pool_type,
init_values=init_values,
norm_layer=transformer_norm_layer,
norm_layer_cl=transformer_norm_layer_cl,
rel_pos_type=rel_pos_type,
rel_pos_dim=rel_pos_dim,
),
)
def _rw_max_cfg(
stride_mode='dw',
pool_type='avg2',
conv_output_bias=False,
conv_attn_ratio=1 / 16,
conv_norm_layer='',
transformer_norm_layer='layernorm2d',
transformer_norm_layer_cl='layernorm',
window_size=None,
dim_head=32,
init_values=None,
rel_pos_type='bias',
rel_pos_dim=512,
):
# 'RW' timm variant models were created and trained before seeing https://github.com/google-research/maxvit
# Differences of initial timm models:
# - mbconv expansion calculated from input instead of output chs
# - mbconv shortcut and final 1x1 conv did not have a bias
# - mbconv uses silu in timm, not gelu
# - expansion in attention block done via output proj, not input proj
return dict(
conv_cfg=MaxxVitConvCfg(
stride_mode=stride_mode,
pool_type=pool_type,
expand_output=False,
output_bias=conv_output_bias,
attn_ratio=conv_attn_ratio,
act_layer='silu',
norm_layer=conv_norm_layer,
),
transformer_cfg=MaxxVitTransformerCfg(
expand_first=False,
pool_type=pool_type,
dim_head=dim_head,
window_size=window_size,
init_values=init_values,
norm_layer=transformer_norm_layer,
norm_layer_cl=transformer_norm_layer_cl,
rel_pos_type=rel_pos_type,
rel_pos_dim=rel_pos_dim,
),
)
def _next_cfg(
stride_mode='dw',
pool_type='avg2',
conv_norm_layer='layernorm2d',
conv_norm_layer_cl='layernorm',
transformer_norm_layer='layernorm2d',
transformer_norm_layer_cl='layernorm',
window_size=None,
init_values=1e-6,
rel_pos_type='mlp', # MLP by default for maxxvit
rel_pos_dim=512,
):
# For experimental models with convnext instead of mbconv
init_values = to_2tuple(init_values)
return dict(
conv_cfg=MaxxVitConvCfg(
block_type='convnext',
stride_mode=stride_mode,
pool_type=pool_type,
expand_output=False,
init_values=init_values[0],
norm_layer=conv_norm_layer,
norm_layer_cl=conv_norm_layer_cl,
),
transformer_cfg=MaxxVitTransformerCfg(
expand_first=False,
pool_type=pool_type,
window_size=window_size,
init_values=init_values[1],
norm_layer=transformer_norm_layer,
norm_layer_cl=transformer_norm_layer_cl,
rel_pos_type=rel_pos_type,
rel_pos_dim=rel_pos_dim,
),
)
model_cfgs = dict(
# Fiddling with configs / defaults / still pretraining
coatnet_pico_rw_224=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 3, 5, 2),
stem_width=(32, 64),
**_rw_max_cfg( # using newer max defaults here
conv_output_bias=True,
conv_attn_ratio=0.25,
),
),
coatnet_nano_rw_224=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(3, 4, 6, 3),
stem_width=(32, 64),
**_rw_max_cfg( # using newer max defaults here
stride_mode='pool',
conv_output_bias=True,
conv_attn_ratio=0.25,
),
),
coatnet_0_rw_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 3, 7, 2), # deeper than paper '0' model
stem_width=(32, 64),
**_rw_coat_cfg(
conv_attn_early=True,
transformer_shortcut_bias=False,
),
),
coatnet_1_rw_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 6, 14, 2),
stem_width=(32, 64),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_early=True,
transformer_shortcut_bias=False,
)
),
coatnet_2_rw_224=MaxxVitCfg(
embed_dim=(128, 256, 512, 1024),
depths=(2, 6, 14, 2),
stem_width=(64, 128),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_act_layer='silu',
init_values=1e-6,
),
),
coatnet_3_rw_224=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 6, 14, 2),
stem_width=(96, 192),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_act_layer='silu',
init_values=1e-6,
),
),
# Highly experimental configs
coatnet_bn_0_rw_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 3, 7, 2), # deeper than paper '0' model
stem_width=(32, 64),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_early=True,
transformer_shortcut_bias=False,
transformer_norm_layer='batchnorm2d',
)
),
coatnet_rmlp_nano_rw_224=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(3, 4, 6, 3),
stem_width=(32, 64),
**_rw_max_cfg(
conv_output_bias=True,
conv_attn_ratio=0.25,
rel_pos_type='mlp',
rel_pos_dim=384,
),
),
coatnet_rmlp_0_rw_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 3, 7, 2), # deeper than paper '0' model
stem_width=(32, 64),
**_rw_coat_cfg(
stride_mode='dw',
rel_pos_type='mlp',
),
),
coatnet_rmlp_1_rw_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 6, 14, 2),
stem_width=(32, 64),
**_rw_coat_cfg(
pool_type='max',
conv_attn_early=True,
transformer_shortcut_bias=False,
rel_pos_type='mlp',
rel_pos_dim=384, # was supposed to be 512, woops
),
),
coatnet_rmlp_2_rw_224=MaxxVitCfg(
embed_dim=(128, 256, 512, 1024),
depths=(2, 6, 14, 2),
stem_width=(64, 128),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_act_layer='silu',
init_values=1e-6,
rel_pos_type='mlp'
),
),
coatnet_rmlp_3_rw_224=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 6, 14, 2),
stem_width=(96, 192),
**_rw_coat_cfg(
stride_mode='dw',
conv_attn_act_layer='silu',
init_values=1e-6,
rel_pos_type='mlp'
),
),
coatnet_nano_cc_224=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(3, 4, 6, 3),
stem_width=(32, 64),
block_type=('C', 'C', ('C', 'T'), ('C', 'T')),
**_rw_coat_cfg(),
),
coatnext_nano_rw_224=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(3, 4, 6, 3),
stem_width=(32, 64),
weight_init='normal',
**_next_cfg(
rel_pos_type='bias',
init_values=(1e-5, None)
),
),
# Trying to be like the CoAtNet paper configs
coatnet_0_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 3, 5, 2),
stem_width=64,
),
coatnet_1_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 6, 14, 2),
stem_width=64,
),
coatnet_2_224=MaxxVitCfg(
embed_dim=(128, 256, 512, 1024),
depths=(2, 6, 14, 2),
stem_width=128,
),
coatnet_3_224=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 6, 14, 2),
stem_width=192,
),
coatnet_4_224=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 12, 28, 2),
stem_width=192,
),
coatnet_5_224=MaxxVitCfg(
embed_dim=(256, 512, 1280, 2048),
depths=(2, 12, 28, 2),
stem_width=192,
),
# Experimental MaxVit configs
maxvit_pico_rw_256=MaxxVitCfg(
embed_dim=(32, 64, 128, 256),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(24, 32),
**_rw_max_cfg(),
),
maxvit_nano_rw_256=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(1, 2, 3, 1),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(),
),
maxvit_tiny_rw_224=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(),
),
maxvit_tiny_rw_256=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(),
),
maxvit_rmlp_pico_rw_256=MaxxVitCfg(
embed_dim=(32, 64, 128, 256),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(24, 32),
**_rw_max_cfg(rel_pos_type='mlp'),
),
maxvit_rmlp_nano_rw_256=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(1, 2, 3, 1),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(rel_pos_type='mlp'),
),
maxvit_rmlp_tiny_rw_256=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(rel_pos_type='mlp'),
),
maxvit_rmlp_small_rw_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(
rel_pos_type='mlp',
init_values=1e-6,
),
),
maxvit_rmlp_small_rw_256=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(
rel_pos_type='mlp',
init_values=1e-6,
),
),
maxvit_tiny_pm_256=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('PM',) * 4,
stem_width=(32, 64),
**_rw_max_cfg(),
),
maxxvit_rmlp_nano_rw_256=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(1, 2, 3, 1),
block_type=('M',) * 4,
stem_width=(32, 64),
weight_init='normal',
**_next_cfg(),
),
maxxvit_rmlp_tiny_rw_256=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(32, 64),
**_next_cfg(),
),
maxxvit_rmlp_small_rw_256=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=(48, 96),
**_next_cfg(),
),
# Trying to be like the MaxViT paper configs
maxvit_tiny_224=MaxxVitCfg(
embed_dim=(64, 128, 256, 512),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=64,
),
maxvit_small_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 2, 5, 2),
block_type=('M',) * 4,
stem_width=64,
),
maxvit_base_224=MaxxVitCfg(
embed_dim=(96, 192, 384, 768),
depths=(2, 6, 14, 2),
block_type=('M',) * 4,
stem_width=64,
),
maxvit_large_224=MaxxVitCfg(
embed_dim=(128, 256, 512, 1024),
depths=(2, 6, 14, 2),
block_type=('M',) * 4,
stem_width=128,
),
maxvit_xlarge_224=MaxxVitCfg(
embed_dim=(192, 384, 768, 1536),
depths=(2, 6, 14, 2),
block_type=('M',) * 4,
stem_width=192,
),
)
class Attention2d(nn.Module):
""" multi-head attention for 2D NCHW tensors"""
def __init__(
self,
dim: int,
dim_out: Optional[int] = None,
dim_head: int = 32,
bias: bool = True,
expand_first: bool = True,
rel_pos_cls: Callable = None,
attn_drop: float = 0.,
proj_drop: float = 0.
):
super().__init__()
dim_out = dim_out or dim
dim_attn = dim_out if expand_first else dim
self.num_heads = dim_attn // dim_head
self.dim_head = dim_head
self.scale = dim_head ** -0.5
self.qkv = nn.Conv2d(dim, dim_attn * 3, 1, bias=bias)
self.rel_pos = rel_pos_cls(num_heads=self.num_heads) if rel_pos_cls else None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Conv2d(dim_attn, dim_out, 1, bias=bias)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
B, C, H, W = x.shape
q, k, v = self.qkv(x).view(B, self.num_heads, self.dim_head * 3, -1).chunk(3, dim=2)
attn = (q.transpose(-2, -1) @ k) * self.scale
if self.rel_pos is not None:
attn = self.rel_pos(attn)
elif shared_rel_pos is not None:
attn = attn + shared_rel_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (v @ attn.transpose(-2, -1)).view(B, -1, H, W)
x = self.proj(x)
x = self.proj_drop(x)
return x
class AttentionCl(nn.Module):
""" Channels-last multi-head attention (B, ..., C) """
def __init__(
self,
dim: int,
dim_out: Optional[int] = None,
dim_head: int = 32,
bias: bool = True,
expand_first: bool = True,
rel_pos_cls: Callable = None,
attn_drop: float = 0.,
proj_drop: float = 0.
):
super().__init__()
dim_out = dim_out or dim
dim_attn = dim_out if expand_first and dim_out > dim else dim
assert dim_attn % dim_head == 0, 'attn dim should be divisible by head_dim'
self.num_heads = dim_attn // dim_head
self.dim_head = dim_head
self.scale = dim_head ** -0.5
self.qkv = nn.Linear(dim, dim_attn * 3, bias=bias)
self.rel_pos = rel_pos_cls(num_heads=self.num_heads) if rel_pos_cls else None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim_attn, dim_out, bias=bias)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
B = x.shape[0]
restore_shape = x.shape[:-1]
q, k, v = self.qkv(x).view(B, -1, self.num_heads, self.dim_head * 3).transpose(1, 2).chunk(3, dim=3)
attn = (q @ k.transpose(-2, -1)) * self.scale
if self.rel_pos is not None:
attn = self.rel_pos(attn, shared_rel_pos=shared_rel_pos)
elif shared_rel_pos is not None:
attn = attn + shared_rel_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(restore_shape + (-1,))
x = self.proj(x)
x = self.proj_drop(x)
return x
class LayerScale(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
gamma = self.gamma
return x.mul_(gamma) if self.inplace else x * gamma
class LayerScale2d(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
gamma = self.gamma.view(1, -1, 1, 1)
return x.mul_(gamma) if self.inplace else x * gamma
class Downsample2d(nn.Module):
""" A downsample pooling module supporting several maxpool and avgpool modes
* 'max' - MaxPool2d w/ kernel_size 3, stride 2, padding 1
* 'max2' - MaxPool2d w/ kernel_size = stride = 2
* 'avg' - AvgPool2d w/ kernel_size 3, stride 2, padding 1
* 'avg2' - AvgPool2d w/ kernel_size = stride = 2
"""
def __init__(
self,
dim: int,
dim_out: int,
pool_type: str = 'avg2',
bias: bool = True,
):
super().__init__()
assert pool_type in ('max', 'max2', 'avg', 'avg2')
if pool_type == 'max':
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
elif pool_type == 'max2':
self.pool = nn.MaxPool2d(2) # kernel_size == stride == 2
elif pool_type == 'avg':
self.pool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1, count_include_pad=False)
else:
self.pool = nn.AvgPool2d(2) # kernel_size == stride == 2
if dim != dim_out:
self.expand = nn.Conv2d(dim, dim_out, 1, bias=bias)
else:
self.expand = nn.Identity()
def forward(self, x):
x = self.pool(x) # spatial downsample
x = self.expand(x) # expand chs
return x
def _init_transformer(module, name, scheme=''):
if isinstance(module, (nn.Conv2d, nn.Linear)):
if scheme == 'normal':
nn.init.normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif scheme == 'trunc_normal':
trunc_normal_tf_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif scheme == 'xavier_normal':
nn.init.xavier_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
else:
# vit like
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
if 'mlp' in name:
nn.init.normal_(module.bias, std=1e-6)
else:
nn.init.zeros_(module.bias)
class TransformerBlock2d(nn.Module):
""" Transformer block with 2D downsampling
'2D' NCHW tensor layout
Some gains can be seen on GPU using a 1D / CL block, BUT w/ the need to switch back/forth to NCHW
for spatial pooling, the benefit is minimal so ended up using just this variant for CoAt configs.
This impl was faster on TPU w/ PT XLA than the 1D experiment.
"""
def __init__(
self,
dim: int,
dim_out: int,
stride: int = 1,
rel_pos_cls: Callable = None,
cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path: float = 0.,
):
super().__init__()
norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps)
act_layer = get_act_layer(cfg.act_layer)
if stride == 2:
self.shortcut = Downsample2d(dim, dim_out, pool_type=cfg.pool_type, bias=cfg.shortcut_bias)
self.norm1 = nn.Sequential(OrderedDict([
('norm', norm_layer(dim)),
('down', Downsample2d(dim, dim, pool_type=cfg.pool_type)),
]))
else:
assert dim == dim_out
self.shortcut = nn.Identity()
self.norm1 = norm_layer(dim)
self.attn = Attention2d(
dim,
dim_out,
dim_head=cfg.dim_head,
expand_first=cfg.expand_first,
bias=cfg.attn_bias,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop
)
self.ls1 = LayerScale2d(dim_out, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim_out)
self.mlp = ConvMlp(
in_features=dim_out,
hidden_features=int(dim_out * cfg.expand_ratio),
act_layer=act_layer,
drop=cfg.proj_drop)
self.ls2 = LayerScale2d(dim_out, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def init_weights(self, scheme=''):
named_apply(partial(_init_transformer, scheme=scheme), self)
def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
x = self.shortcut(x) + self.drop_path1(self.ls1(self.attn(self.norm1(x), shared_rel_pos=shared_rel_pos)))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
def _init_conv(module, name, scheme=''):
if isinstance(module, nn.Conv2d):
if scheme == 'normal':
nn.init.normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif scheme == 'trunc_normal':
trunc_normal_tf_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif scheme == 'xavier_normal':
nn.init.xavier_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
else:
# efficientnet like
fan_out = module.kernel_size[0] * module.kernel_size[1] * module.out_channels
fan_out //= module.groups
nn.init.normal_(module.weight, 0, math.sqrt(2.0 / fan_out))
if module.bias is not None:
nn.init.zeros_(module.bias)
def num_groups(group_size, channels):
if not group_size: # 0 or None
return 1 # normal conv with 1 group
else:
# NOTE group_size == 1 -> depthwise conv
assert channels % group_size == 0
return channels // group_size
class MbConvBlock(nn.Module):
""" Pre-Norm Conv Block - 1x1 - kxk - 1x1, w/ inverted bottleneck (expand)
"""
def __init__(
self,
in_chs: int,
out_chs: int,
stride: int = 1,
dilation: Tuple[int, int] = (1, 1),
cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
drop_path: float = 0.
):
super(MbConvBlock, self).__init__()
norm_act_layer = partial(get_norm_act_layer(cfg.norm_layer, cfg.act_layer), eps=cfg.norm_eps)
mid_chs = make_divisible((out_chs if cfg.expand_output else in_chs) * cfg.expand_ratio)
groups = num_groups(cfg.group_size, mid_chs)
if stride == 2:
self.shortcut = Downsample2d(in_chs, out_chs, pool_type=cfg.pool_type, bias=cfg.output_bias)
else:
self.shortcut = nn.Identity()
assert cfg.stride_mode in ('pool', '1x1', 'dw')
stride_pool, stride_1, stride_2 = 1, 1, 1
if cfg.stride_mode == 'pool':
# NOTE this is not described in paper, experiment to find faster option that doesn't stride in 1x1
stride_pool, dilation_2 = stride, dilation[1]
# FIXME handle dilation of avg pool
elif cfg.stride_mode == '1x1':
# NOTE I don't like this option described in paper, 1x1 w/ stride throws info away
stride_1, dilation_2 = stride, dilation[1]
else:
stride_2, dilation_2 = stride, dilation[0]
self.pre_norm = norm_act_layer(in_chs, apply_act=cfg.pre_norm_act)
if stride_pool > 1:
self.down = Downsample2d(in_chs, in_chs, pool_type=cfg.downsample_pool_type)
else:
self.down = nn.Identity()
self.conv1_1x1 = create_conv2d(in_chs, mid_chs, 1, stride=stride_1)
self.norm1 = norm_act_layer(mid_chs)
self.conv2_kxk = create_conv2d(
mid_chs, mid_chs, cfg.kernel_size, stride=stride_2, dilation=dilation_2, groups=groups)
attn_kwargs = {}
if isinstance(cfg.attn_layer, str):
if cfg.attn_layer == 'se' or cfg.attn_layer == 'eca':
attn_kwargs['act_layer'] = cfg.attn_act_layer
attn_kwargs['rd_channels'] = int(cfg.attn_ratio * (out_chs if cfg.expand_output else mid_chs))
# two different orderings for SE and norm2 (due to some weights and trials using SE before norm2)
if cfg.attn_early:
self.se_early = create_attn(cfg.attn_layer, mid_chs, **attn_kwargs)
self.norm2 = norm_act_layer(mid_chs)
self.se = None
else:
self.se_early = None
self.norm2 = norm_act_layer(mid_chs)
self.se = create_attn(cfg.attn_layer, mid_chs, **attn_kwargs)
self.conv3_1x1 = create_conv2d(mid_chs, out_chs, 1, bias=cfg.output_bias)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def init_weights(self, scheme=''):
named_apply(partial(_init_conv, scheme=scheme), self)
def forward(self, x):
shortcut = self.shortcut(x)
x = self.pre_norm(x)
x = self.down(x)
# 1x1 expansion conv & norm-act
x = self.conv1_1x1(x)
x = self.norm1(x)
# depthwise / grouped 3x3 conv w/ SE (or other) channel attention & norm-act
x = self.conv2_kxk(x)
if self.se_early is not None:
x = self.se_early(x)
x = self.norm2(x)
if self.se is not None:
x = self.se(x)
# 1x1 linear projection to output width
x = self.conv3_1x1(x)
x = self.drop_path(x) + shortcut
return x
class ConvNeXtBlock(nn.Module):
""" ConvNeXt Block
"""
def __init__(
self,
in_chs: int,
out_chs: Optional[int] = None,
kernel_size: int = 7,
stride: int = 1,
dilation: Tuple[int, int] = (1, 1),
cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
conv_mlp: bool = True,
drop_path: float = 0.
):
super().__init__()
out_chs = out_chs or in_chs
act_layer = get_act_layer(cfg.act_layer)
if conv_mlp:
norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps)
mlp_layer = ConvMlp
else:
assert 'layernorm' in cfg.norm_layer
norm_layer = LayerNorm
mlp_layer = Mlp
self.use_conv_mlp = conv_mlp
if stride == 2:
self.shortcut = Downsample2d(in_chs, out_chs)
elif in_chs != out_chs:
self.shortcut = nn.Conv2d(in_chs, out_chs, kernel_size=1, bias=cfg.output_bias)
else:
self.shortcut = nn.Identity()
assert cfg.stride_mode in ('pool', 'dw')
stride_pool, stride_dw = 1, 1
# FIXME handle dilation?
if cfg.stride_mode == 'pool':
stride_pool = stride
else:
stride_dw = stride
if stride_pool == 2:
self.down = Downsample2d(in_chs, in_chs, pool_type=cfg.downsample_pool_type)
else:
self.down = nn.Identity()
self.conv_dw = create_conv2d(
in_chs, out_chs, kernel_size=kernel_size, stride=stride_dw, dilation=dilation[1],
depthwise=True, bias=cfg.output_bias)
self.norm = norm_layer(out_chs)
self.mlp = mlp_layer(out_chs, int(cfg.expand_ratio * out_chs), bias=cfg.output_bias, act_layer=act_layer)
if conv_mlp:
self.ls = LayerScale2d(out_chs, cfg.init_values) if cfg.init_values else nn.Identity()
else:
self.ls = LayerScale(out_chs, cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = self.shortcut(x)
x = self.down(x)
x = self.conv_dw(x)
if self.use_conv_mlp:
x = self.norm(x)
x = self.mlp(x)
x = self.ls(x)
else:
x = x.permute(0, 2, 3, 1)
x = self.norm(x)
x = self.mlp(x)
x = self.ls(x)
x = x.permute(0, 3, 1, 2)
x = self.drop_path(x) + shortcut
return x
def window_partition(x, window_size: List[int]):
B, H, W, C = x.shape
_assert(H % window_size[0] == 0, f'height ({H}) must be divisible by window ({window_size[0]})')
_assert(W % window_size[1] == 0, '')
x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C)
return windows
@register_notrace_function # reason: int argument is a Proxy
def window_reverse(windows, window_size: List[int], img_size: List[int]):
H, W = img_size
C = windows.shape[-1]
x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C)
return x
def grid_partition(x, grid_size: List[int]):
B, H, W, C = x.shape
_assert(H % grid_size[0] == 0, f'height {H} must be divisible by grid {grid_size[0]}')
_assert(W % grid_size[1] == 0, '')
x = x.view(B, grid_size[0], H // grid_size[0], grid_size[1], W // grid_size[1], C)
windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view(-1, grid_size[0], grid_size[1], C)
return windows
@register_notrace_function # reason: int argument is a Proxy
def grid_reverse(windows, grid_size: List[int], img_size: List[int]):
H, W = img_size
C = windows.shape[-1]
x = windows.view(-1, H // grid_size[0], W // grid_size[1], grid_size[0], grid_size[1], C)
x = x.permute(0, 3, 1, 4, 2, 5).contiguous().view(-1, H, W, C)
return x
def get_rel_pos_cls(cfg: MaxxVitTransformerCfg, window_size):
rel_pos_cls = None
if cfg.rel_pos_type == 'mlp':
rel_pos_cls = partial(RelPosMlp, window_size=window_size, hidden_dim=cfg.rel_pos_dim)
elif cfg.rel_pos_type == 'bias':
rel_pos_cls = partial(RelPosBias, window_size=window_size)
return rel_pos_cls
class PartitionAttentionCl(nn.Module):
""" Grid or Block partition + Attn + FFN.
NxC 'channels last' tensor layout.
"""
def __init__(
self,
dim: int,
partition_type: str = 'block',
cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path: float = 0.,
):
super().__init__()
norm_layer = partial(get_norm_layer(cfg.norm_layer_cl), eps=cfg.norm_eps) # NOTE this block is channels-last
act_layer = get_act_layer(cfg.act_layer)
self.partition_block = partition_type == 'block'
self.partition_size = to_2tuple(cfg.window_size if self.partition_block else cfg.grid_size)
rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size)
self.norm1 = norm_layer(dim)
self.attn = AttentionCl(
dim,
dim,
dim_head=cfg.dim_head,
bias=cfg.attn_bias,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop,
)
self.ls1 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * cfg.expand_ratio),
act_layer=act_layer,
drop=cfg.proj_drop)
self.ls2 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def _partition_attn(self, x):
img_size = x.shape[1:3]
if self.partition_block:
partitioned = window_partition(x, self.partition_size)
else:
partitioned = grid_partition(x, self.partition_size)
partitioned = self.attn(partitioned)
if self.partition_block:
x = window_reverse(partitioned, self.partition_size, img_size)
else:
x = grid_reverse(partitioned, self.partition_size, img_size)
return x
def forward(self, x):
x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x))))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
class ParallelPartitionAttention(nn.Module):
""" Experimental. Grid and Block partition + single FFN
NxC tensor layout.
"""
def __init__(
self,
dim: int,
cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path: float = 0.,
):
super().__init__()
assert dim % 2 == 0
norm_layer = partial(get_norm_layer(cfg.norm_layer_cl), eps=cfg.norm_eps) # NOTE this block is channels-last
act_layer = get_act_layer(cfg.act_layer)
assert cfg.window_size == cfg.grid_size
self.partition_size = to_2tuple(cfg.window_size)
rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size)
self.norm1 = norm_layer(dim)
self.attn_block = AttentionCl(
dim,
dim // 2,
dim_head=cfg.dim_head,
bias=cfg.attn_bias,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop,
)
self.attn_grid = AttentionCl(
dim,
dim // 2,
dim_head=cfg.dim_head,
bias=cfg.attn_bias,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop,
)
self.ls1 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * cfg.expand_ratio),
out_features=dim,
act_layer=act_layer,
drop=cfg.proj_drop)
self.ls2 = LayerScale(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def _partition_attn(self, x):
img_size = x.shape[1:3]
partitioned_block = window_partition(x, self.partition_size)
partitioned_block = self.attn_block(partitioned_block)
x_window = window_reverse(partitioned_block, self.partition_size, img_size)
partitioned_grid = grid_partition(x, self.partition_size)
partitioned_grid = self.attn_grid(partitioned_grid)
x_grid = grid_reverse(partitioned_grid, self.partition_size, img_size)
return torch.cat([x_window, x_grid], dim=-1)
def forward(self, x):
x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x))))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
def window_partition_nchw(x, window_size: List[int]):
B, C, H, W = x.shape
_assert(H % window_size[0] == 0, f'height ({H}) must be divisible by window ({window_size[0]})')
_assert(W % window_size[1] == 0, '')
x = x.view(B, C, H // window_size[0], window_size[0], W // window_size[1], window_size[1])
windows = x.permute(0, 2, 4, 1, 3, 5).contiguous().view(-1, C, window_size[0], window_size[1])
return windows
@register_notrace_function # reason: int argument is a Proxy
def window_reverse_nchw(windows, window_size: List[int], img_size: List[int]):
H, W = img_size
C = windows.shape[1]
x = windows.view(-1, H // window_size[0], W // window_size[1], C, window_size[0], window_size[1])
x = x.permute(0, 3, 1, 4, 2, 5).contiguous().view(-1, C, H, W)
return x
def grid_partition_nchw(x, grid_size: List[int]):
B, C, H, W = x.shape
_assert(H % grid_size[0] == 0, f'height {H} must be divisible by grid {grid_size[0]}')
_assert(W % grid_size[1] == 0, '')
x = x.view(B, C, grid_size[0], H // grid_size[0], grid_size[1], W // grid_size[1])
windows = x.permute(0, 3, 5, 1, 2, 4).contiguous().view(-1, C, grid_size[0], grid_size[1])
return windows
@register_notrace_function # reason: int argument is a Proxy
def grid_reverse_nchw(windows, grid_size: List[int], img_size: List[int]):
H, W = img_size
C = windows.shape[1]
x = windows.view(-1, H // grid_size[0], W // grid_size[1], C, grid_size[0], grid_size[1])
x = x.permute(0, 3, 4, 1, 5, 2).contiguous().view(-1, C, H, W)
return x
class PartitionAttention2d(nn.Module):
""" Grid or Block partition + Attn + FFN
'2D' NCHW tensor layout.
"""
def __init__(
self,
dim: int,
partition_type: str = 'block',
cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path: float = 0.,
):
super().__init__()
norm_layer = partial(get_norm_layer(cfg.norm_layer), eps=cfg.norm_eps) # NOTE this block is channels-last
act_layer = get_act_layer(cfg.act_layer)
self.partition_block = partition_type == 'block'
self.partition_size = to_2tuple(cfg.window_size if self.partition_block else cfg.grid_size)
rel_pos_cls = get_rel_pos_cls(cfg, self.partition_size)
self.norm1 = norm_layer(dim)
self.attn = Attention2d(
dim,
dim,
dim_head=cfg.dim_head,
bias=cfg.attn_bias,
rel_pos_cls=rel_pos_cls,
attn_drop=cfg.attn_drop,
proj_drop=cfg.proj_drop,
)
self.ls1 = LayerScale2d(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = ConvMlp(
in_features=dim,
hidden_features=int(dim * cfg.expand_ratio),
act_layer=act_layer,
drop=cfg.proj_drop)
self.ls2 = LayerScale2d(dim, init_values=cfg.init_values) if cfg.init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def _partition_attn(self, x):
img_size = x.shape[-2:]
if self.partition_block:
partitioned = window_partition_nchw(x, self.partition_size)
else:
partitioned = grid_partition_nchw(x, self.partition_size)
partitioned = self.attn(partitioned)
if self.partition_block:
x = window_reverse_nchw(partitioned, self.partition_size, img_size)
else:
x = grid_reverse_nchw(partitioned, self.partition_size, img_size)
return x
def forward(self, x):
x = x + self.drop_path1(self.ls1(self._partition_attn(self.norm1(x))))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
class MaxxVitBlock(nn.Module):
""" MaxVit conv, window partition + FFN , grid partition + FFN
"""
def __init__(
self,
dim: int,
dim_out: int,
stride: int = 1,
conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
use_nchw_attn: bool = False, # FIXME move to cfg? True is ~20-30% faster on TPU, 5-10% slower on GPU
drop_path: float = 0.,
):
super().__init__()
conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock
self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)
attn_kwargs = dict(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path)
partition_layer = PartitionAttention2d if use_nchw_attn else PartitionAttentionCl
self.nchw_attn = use_nchw_attn
self.attn_block = partition_layer(**attn_kwargs)
self.attn_grid = partition_layer(partition_type='grid', **attn_kwargs)
def init_weights(self, scheme=''):
named_apply(partial(_init_transformer, scheme=scheme), self.attn_block)
named_apply(partial(_init_transformer, scheme=scheme), self.attn_grid)
named_apply(partial(_init_conv, scheme=scheme), self.conv)
def forward(self, x):
# NCHW format
x = self.conv(x)
if not self.nchw_attn:
x = x.permute(0, 2, 3, 1) # to NHWC (channels-last)
x = self.attn_block(x)
x = self.attn_grid(x)
if not self.nchw_attn:
x = x.permute(0, 3, 1, 2) # back to NCHW
return x
class ParallelMaxxVitBlock(nn.Module):
""" MaxVit block with parallel cat(window + grid), one FF
Experimental timm block.
"""
def __init__(
self,
dim,
dim_out,
stride=1,
num_conv=2,
conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
drop_path=0.,
):
super().__init__()
conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock
if num_conv > 1:
convs = [conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)]
convs += [conv_cls(dim_out, dim_out, cfg=conv_cfg, drop_path=drop_path)] * (num_conv - 1)
self.conv = nn.Sequential(*convs)
else:
self.conv = conv_cls(dim, dim_out, stride=stride, cfg=conv_cfg, drop_path=drop_path)
self.attn = ParallelPartitionAttention(dim=dim_out, cfg=transformer_cfg, drop_path=drop_path)
def init_weights(self, scheme=''):
named_apply(partial(_init_transformer, scheme=scheme), self.attn)
named_apply(partial(_init_conv, scheme=scheme), self.conv)
def forward(self, x):
x = self.conv(x)
x = x.permute(0, 2, 3, 1)
x = self.attn(x)
x = x.permute(0, 3, 1, 2)
return x
class MaxxVitStage(nn.Module):
def __init__(
self,
in_chs: int,
out_chs: int,
stride: int = 2,
depth: int = 4,
feat_size: Tuple[int, int] = (14, 14),
block_types: Union[str, Tuple[str]] = 'C',
transformer_cfg: MaxxVitTransformerCfg = MaxxVitTransformerCfg(),
conv_cfg: MaxxVitConvCfg = MaxxVitConvCfg(),
drop_path: Union[float, List[float]] = 0.,
):
super().__init__()
self.grad_checkpointing = False
block_types = extend_tuple(block_types, depth)
blocks = []
for i, t in enumerate(block_types):
block_stride = stride if i == 0 else 1
assert t in ('C', 'T', 'M', 'PM')
if t == 'C':
conv_cls = ConvNeXtBlock if conv_cfg.block_type == 'convnext' else MbConvBlock
blocks += [conv_cls(
in_chs,
out_chs,
stride=block_stride,
cfg=conv_cfg,
drop_path=drop_path[i],
)]
elif t == 'T':
rel_pos_cls = get_rel_pos_cls(transformer_cfg, feat_size)
blocks += [TransformerBlock2d(
in_chs,
out_chs,
stride=block_stride,
rel_pos_cls=rel_pos_cls,
cfg=transformer_cfg,
drop_path=drop_path[i],
)]
elif t == 'M':
blocks += [MaxxVitBlock(
in_chs,
out_chs,
stride=block_stride,
conv_cfg=conv_cfg,
transformer_cfg=transformer_cfg,
drop_path=drop_path[i],
)]
elif t == 'PM':
blocks += [ParallelMaxxVitBlock(
in_chs,
out_chs,
stride=block_stride,
conv_cfg=conv_cfg,
transformer_cfg=transformer_cfg,
drop_path=drop_path[i],
)]
in_chs = out_chs
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class Stem(nn.Module):
def __init__(
self,
in_chs: int,
out_chs: int,
kernel_size: int = 3,
act_layer: str = 'gelu',
norm_layer: str = 'batchnorm2d',
norm_eps: float = 1e-5,
):
super().__init__()
if not isinstance(out_chs, (list, tuple)):
out_chs = to_2tuple(out_chs)
norm_act_layer = partial(get_norm_act_layer(norm_layer, act_layer), eps=norm_eps)
self.out_chs = out_chs[-1]
self.stride = 2
self.conv1 = create_conv2d(in_chs, out_chs[0], kernel_size, stride=2)
self.norm1 = norm_act_layer(out_chs[0])
self.conv2 = create_conv2d(out_chs[0], out_chs[1], kernel_size, stride=1)
def init_weights(self, scheme=''):
named_apply(partial(_init_conv, scheme=scheme), self)
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.conv2(x)
return x
def cfg_window_size(cfg: MaxxVitTransformerCfg, img_size: Tuple[int, int]):
if cfg.window_size is not None:
assert cfg.grid_size
return cfg
partition_size = img_size[0] // cfg.partition_ratio, img_size[1] // cfg.partition_ratio
cfg = replace(cfg, window_size=partition_size, grid_size=partition_size)
return cfg
class MaxxVit(nn.Module):
""" CoaTNet + MaxVit base model.
Highly configurable for different block compositions, tensor layouts, pooling types.
"""
def __init__(
self,
cfg: MaxxVitCfg,
img_size: Union[int, Tuple[int, int]] = 224,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
drop_rate: float = 0.,
drop_path_rate: float = 0.
):
super().__init__()
img_size = to_2tuple(img_size)
transformer_cfg = cfg_window_size(cfg.transformer_cfg, img_size)
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = cfg.embed_dim[-1]
self.embed_dim = cfg.embed_dim
self.drop_rate = drop_rate
self.grad_checkpointing = False
self.stem = Stem(
in_chs=in_chans,
out_chs=cfg.stem_width,
act_layer=cfg.conv_cfg.act_layer,
norm_layer=cfg.conv_cfg.norm_layer,
norm_eps=cfg.conv_cfg.norm_eps,
)
stride = self.stem.stride
feat_size = tuple([i // s for i, s in zip(img_size, to_2tuple(stride))])
num_stages = len(cfg.embed_dim)
assert len(cfg.depths) == num_stages
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)]
in_chs = self.stem.out_chs
#final_norm_layer = get_norm_layer(cfg.transformer_cfg.norm_layer)
stages = []
#norms = []
for i in range(num_stages):
stage_stride = 2
out_chs = cfg.embed_dim[i]
feat_size = tuple([(r - 1) // stage_stride + 1 for r in feat_size])
stages += [MaxxVitStage(
in_chs,
out_chs,
depth=cfg.depths[i],
block_types=cfg.block_type[i],
conv_cfg=cfg.conv_cfg,
transformer_cfg=transformer_cfg,
feat_size=feat_size,
drop_path=dpr[i],
)]
#norms.append(final_norm_layer(out_chs, eps=cfg.transformer_cfg.norm_eps))
stride *= stage_stride
in_chs = out_chs
self.stages = nn.Sequential(*stages)
#self.norms = nn.Sequential(*norms)
final_norm_layer = get_norm_layer(cfg.transformer_cfg.norm_layer)
self.norm = final_norm_layer(self.num_features, eps=cfg.transformer_cfg.norm_eps)
# Classifier head
#self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate)
# Weight init (default PyTorch init works well for AdamW if scheme not set)
assert cfg.weight_init in ('', 'normal', 'trunc_normal', 'xavier_normal', 'vit_eff')
if cfg.weight_init:
named_apply(partial(self._init_weights, scheme=cfg.weight_init), self)
def _init_weights(self, module, name, scheme=''):
if hasattr(module, 'init_weights'):
try:
module.init_weights(scheme=scheme)
except TypeError:
module.init_weights()
@torch.jit.ignore
def no_weight_decay(self):
return {
k for k, _ in self.named_parameters()
if any(n in k for n in ["relative_position_bias_table", "rel_pos.mlp"])}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem', # stem and embed
blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is None:
global_pool = self.head.global_pool.pool_type | self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate) | 4 | 2023-10-24 17:49:10+00:00 | 12k |
StackTipsLab/bloggy | bloggy/urls.py | [
{
"identifier": "settings",
"path": "bloggy/settings.py",
"snippet": "BASE_DIR = Path(__file__).resolve().parent.parent\nSECRET_KEY = os.getenv(\"SECRET_KEY\", get_random_secret_key())\nDEBUG = os.getenv(\"DEBUG\", \"False\") == \"True\"\nALLOWED_HOSTS = os.getenv(\"ALLOWED_HOSTS\", \"127.0.0.1, localho... | from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import LogoutView
from django.contrib.auth.views import PasswordChangeView
from django.contrib.sitemaps.views import sitemap, index
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.urls import path, include
from django.views.generic.base import TemplateView
from bloggy import settings
from bloggy.views import EditProfileView
from bloggy.views.courses_view import CoursesListView, CourseDetailsView, LessonDetailsView
from bloggy.views.pages import IndexView
from bloggy.views.category_view import CategoriesView, CategoryDetailsView
from .services.sitemaps import sitemaps_list
from .views import RegisterView
from .views.account import AccountActivationView
from .views.posts import PostListView, PostDetailsView
from .views.login import MyLoginView
from .views.pages import AdsTextView, robots
from .views.pages import PageDetailsView
from .views.quizzes_view import QuizListView, QuizDetailView
from .views.rss import PostsRssFeed, CoursesRssFeed
from .views.search import SearchListView
from .views.user import MyProfileView, PublicProfileView, AuthorsListView
from .views.user_collections import UserBookmarksView | 8,298 | """bloggy URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
urlpatterns = [
path('admin/', admin.site.urls),
path('admin/password_change/', PasswordChangeView.as_view(), name='password_change'),
path('', IndexView.as_view(), name='index'),
path('articles', PostListView.as_view(), name='posts'),
path('articles/<slug:slug>', PostDetailsView.as_view(), name='post_single'),
path('topics', CategoriesView.as_view(), name='categories'),
path('topics/<str:slug>', CategoryDetailsView.as_view(), name='categories_single'),
path('search', SearchListView.as_view(), name='search'),
path('courses', CoursesListView.as_view(), name='courses'),
path('courses/<slug:slug>', CourseDetailsView.as_view(), name='courses_single'),
path('courses/<str:course>/<slug:slug>', LessonDetailsView.as_view(), name='lesson_single'),
path('quizzes', QuizListView.as_view(), name='quizzes'),
path('quizzes/<slug:slug>', QuizDetailView.as_view(), name='quiz_single'),
path('login', MyLoginView.as_view(template_name="auth/login.html"), name='login'),
path('logout', LogoutView.as_view(), name='logout'),
path('register', RegisterView.as_view(), name='register'),
path('activate/<str:uuid>/<str:token>', AccountActivationView.as_view(), name='activate_account'),
path('authors', AuthorsListView.as_view(), name="authors"),
path('user/<str:username>', PublicProfileView.as_view(), name="user_profile"),
path('edit-profile', login_required(EditProfileView.as_view()), name="profile.edit_profile"),
# path('dashboard', login_required(MyProfileView.as_view()), name="profile.dashboard"),
path('bookmarks', login_required(UserBookmarksView.as_view()), name="profile.bookmarks"),
path('contact', TemplateView.as_view(template_name="pages/contact.html"), name='pages.contact'),
path("rss/articles", PostsRssFeed(), name="articles_feed"),
path("rss/courses", CoursesRssFeed(), name="courses_feed"),
path('sitemap.xml', index, {'sitemaps': sitemaps_list}, name='django.contrib.sitemaps.views.index'),
path('sitemap/<str:section>.xml', sitemap, {'sitemaps': sitemaps_list},
name='django.contrib.sitemaps.views.sitemap'),
# static files for SEO or other reasons
| """bloggy URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
urlpatterns = [
path('admin/', admin.site.urls),
path('admin/password_change/', PasswordChangeView.as_view(), name='password_change'),
path('', IndexView.as_view(), name='index'),
path('articles', PostListView.as_view(), name='posts'),
path('articles/<slug:slug>', PostDetailsView.as_view(), name='post_single'),
path('topics', CategoriesView.as_view(), name='categories'),
path('topics/<str:slug>', CategoryDetailsView.as_view(), name='categories_single'),
path('search', SearchListView.as_view(), name='search'),
path('courses', CoursesListView.as_view(), name='courses'),
path('courses/<slug:slug>', CourseDetailsView.as_view(), name='courses_single'),
path('courses/<str:course>/<slug:slug>', LessonDetailsView.as_view(), name='lesson_single'),
path('quizzes', QuizListView.as_view(), name='quizzes'),
path('quizzes/<slug:slug>', QuizDetailView.as_view(), name='quiz_single'),
path('login', MyLoginView.as_view(template_name="auth/login.html"), name='login'),
path('logout', LogoutView.as_view(), name='logout'),
path('register', RegisterView.as_view(), name='register'),
path('activate/<str:uuid>/<str:token>', AccountActivationView.as_view(), name='activate_account'),
path('authors', AuthorsListView.as_view(), name="authors"),
path('user/<str:username>', PublicProfileView.as_view(), name="user_profile"),
path('edit-profile', login_required(EditProfileView.as_view()), name="profile.edit_profile"),
# path('dashboard', login_required(MyProfileView.as_view()), name="profile.dashboard"),
path('bookmarks', login_required(UserBookmarksView.as_view()), name="profile.bookmarks"),
path('contact', TemplateView.as_view(template_name="pages/contact.html"), name='pages.contact'),
path("rss/articles", PostsRssFeed(), name="articles_feed"),
path("rss/courses", CoursesRssFeed(), name="courses_feed"),
path('sitemap.xml', index, {'sitemaps': sitemaps_list}, name='django.contrib.sitemaps.views.index'),
path('sitemap/<str:section>.xml', sitemap, {'sitemaps': sitemaps_list},
name='django.contrib.sitemaps.views.sitemap'),
# static files for SEO or other reasons | path('robots.txt', robots, name='robots'), | 15 | 2023-10-17 14:50:39+00:00 | 12k |
zabbix/python-zabbix-utils | .github/scripts/compatibility_api_test_5.py | [
{
"identifier": "Getter",
"path": "zabbix_utils/getter.py",
"snippet": "class Getter():\n \"\"\"Zabbix get implementation.\n\n Args:\n host (str, optional): Zabbix agent address. Defaults to `'127.0.0.1'`.\n\n port (int, optional): Zabbix agent port. Defaults to `10050`.\n\n t... | import sys
import time
import unittest
from zabbix_utils.getter import Getter
from zabbix_utils.api import ZabbixAPI, APIVersion
from zabbix_utils.sender import ItemValue, Sender, TrapperResponse
from zabbix_utils.exceptions import APIRequestError, APINotSupported | 9,353 | password=self.password
)
self.assertIsNotNone(self.zapi._ZabbixAPI__session_id, "Login by user and password was going wrong")
resp = self.zapi.user.checkAuthentication(sessionid=self.zapi._ZabbixAPI__session_id)
self.assertEqual(
type(resp), dict, "Request user.checkAuthentication was going wrong")
users = self.zapi.user.get(
output=['userid', 'name']
)
self.assertEqual(type(users), list, "Request user.get was going wrong")
self.zapi.logout()
self.assertIsNone(self.zapi._ZabbixAPI__session_id, "Logout was going wrong")
with self.assertRaises(APIRequestError,
msg="Request user.checkAuthentication after logout was going wrong"):
resp = self.zapi.user.checkAuthentication(sessionid=self.zapi._ZabbixAPI__session_id)
def test_token_auth(self):
"""Tests auth using token"""
with self.assertRaises(APINotSupported,
msg="Login by token should be not supported"):
self.zapi.login(token=self.token)
class CompatibilitySenderTest(unittest.TestCase):
"""Compatibility test with Zabbix sender version 5.0"""
def setUp(self):
self.ip = '127.0.0.1'
self.port = 10051
self.chunk_size = 10
self.sender = Sender(
server=self.ip,
port=self.port,
chunk_size=self.chunk_size
)
self.hostname = f"{self.__class__.__name__}_host"
self.itemname = f"{self.__class__.__name__}_item"
self.itemkey = f"{self.__class__.__name__}"
self.prepare_items()
def prepare_items(self):
"""Creates host and items for sending values later"""
zapi = ZabbixAPI(
url=ZABBIX_URL,
user=ZABBIX_USER,
password=ZABBIX_PASSWORD,
skip_version_check=True
)
hosts = zapi.host.get(
filter={'host': self.hostname},
output=['hostid']
)
hostid = None
if len(hosts) > 0:
hostid = hosts[0].get('hostid')
if not hostid:
hostid = zapi.host.create(
host=self.hostname,
interfaces=[{
"type": 1,
"main": 1,
"useip": 1,
"ip": "127.0.0.1",
"dns": "",
"port": "10050"
}],
groups=[{"groupid": "2"}]
)['hostids'][0]
self.assertIsNotNone(hostid, "Creating test host was going wrong")
items = zapi.item.get(
filter={'key_': self.itemkey},
output=['itemid']
)
itemid = None
if len(items) > 0:
itemid = items[0].get('itemid')
if not itemid:
itemid = zapi.item.create(
name=self.itemname,
key_=self.itemkey,
hostid=hostid,
type=2,
value_type=3
)['itemids'][0]
time.sleep(2)
self.assertIsNotNone(hostid, "Creating test item was going wrong")
zapi.logout()
def test_send_values(self):
"""Tests sending item values"""
items = [
ItemValue(self.hostname, self.itemkey, 10),
ItemValue(self.hostname, self.itemkey, 'test message'),
ItemValue(self.hostname, 'item_key1', -1, 1695713666),
ItemValue(self.hostname, 'item_key2', '{"msg":"test message"}'),
ItemValue(self.hostname, self.itemkey, 0, 1695713666, 100),
ItemValue(self.hostname, self.itemkey, 5.5, 1695713666)
]
resp = list(self.sender.send(items).values())[0]
| #!/usr/bin/env python
# Copyright (C) 2001-2023 Zabbix SIA
#
# Zabbix SIA licenses this file under the MIT License.
# See the LICENSE file in the project root for more information.
sys.path.append('.')
ZABBIX_URL = 'localhost'
ZABBIX_USER = 'Admin'
ZABBIX_PASSWORD = 'zabbix'
class CompatibilityAPITest(unittest.TestCase):
"""Compatibility test with Zabbix API version 5.0"""
def setUp(self):
self.url = 'localhost'
self.user = 'Admin'
self.password = 'zabbix'
self.token = 'token'
self.zapi = ZabbixAPI(
url=self.url
)
def test_classic_auth(self):
"""Tests classic auth using username and password"""
self.assertEqual(
type(self.zapi), ZabbixAPI, "Creating ZabbixAPI object was going wrong")
self.assertEqual(
type(self.zapi.api_version()), APIVersion, "Version getting was going wrong")
self.zapi.login(
user=self.user,
password=self.password
)
self.assertIsNotNone(self.zapi._ZabbixAPI__session_id, "Login by user and password was going wrong")
resp = self.zapi.user.checkAuthentication(sessionid=self.zapi._ZabbixAPI__session_id)
self.assertEqual(
type(resp), dict, "Request user.checkAuthentication was going wrong")
users = self.zapi.user.get(
output=['userid', 'name']
)
self.assertEqual(type(users), list, "Request user.get was going wrong")
self.zapi.logout()
self.assertIsNone(self.zapi._ZabbixAPI__session_id, "Logout was going wrong")
with self.assertRaises(APIRequestError,
msg="Request user.checkAuthentication after logout was going wrong"):
resp = self.zapi.user.checkAuthentication(sessionid=self.zapi._ZabbixAPI__session_id)
def test_token_auth(self):
"""Tests auth using token"""
with self.assertRaises(APINotSupported,
msg="Login by token should be not supported"):
self.zapi.login(token=self.token)
class CompatibilitySenderTest(unittest.TestCase):
"""Compatibility test with Zabbix sender version 5.0"""
def setUp(self):
self.ip = '127.0.0.1'
self.port = 10051
self.chunk_size = 10
self.sender = Sender(
server=self.ip,
port=self.port,
chunk_size=self.chunk_size
)
self.hostname = f"{self.__class__.__name__}_host"
self.itemname = f"{self.__class__.__name__}_item"
self.itemkey = f"{self.__class__.__name__}"
self.prepare_items()
def prepare_items(self):
"""Creates host and items for sending values later"""
zapi = ZabbixAPI(
url=ZABBIX_URL,
user=ZABBIX_USER,
password=ZABBIX_PASSWORD,
skip_version_check=True
)
hosts = zapi.host.get(
filter={'host': self.hostname},
output=['hostid']
)
hostid = None
if len(hosts) > 0:
hostid = hosts[0].get('hostid')
if not hostid:
hostid = zapi.host.create(
host=self.hostname,
interfaces=[{
"type": 1,
"main": 1,
"useip": 1,
"ip": "127.0.0.1",
"dns": "",
"port": "10050"
}],
groups=[{"groupid": "2"}]
)['hostids'][0]
self.assertIsNotNone(hostid, "Creating test host was going wrong")
items = zapi.item.get(
filter={'key_': self.itemkey},
output=['itemid']
)
itemid = None
if len(items) > 0:
itemid = items[0].get('itemid')
if not itemid:
itemid = zapi.item.create(
name=self.itemname,
key_=self.itemkey,
hostid=hostid,
type=2,
value_type=3
)['itemids'][0]
time.sleep(2)
self.assertIsNotNone(hostid, "Creating test item was going wrong")
zapi.logout()
def test_send_values(self):
"""Tests sending item values"""
items = [
ItemValue(self.hostname, self.itemkey, 10),
ItemValue(self.hostname, self.itemkey, 'test message'),
ItemValue(self.hostname, 'item_key1', -1, 1695713666),
ItemValue(self.hostname, 'item_key2', '{"msg":"test message"}'),
ItemValue(self.hostname, self.itemkey, 0, 1695713666, 100),
ItemValue(self.hostname, self.itemkey, 5.5, 1695713666)
]
resp = list(self.sender.send(items).values())[0]
| self.assertEqual(type(resp), TrapperResponse, "Sending item values was going wrong") | 5 | 2023-10-16 12:49:35+00:00 | 12k |
YefanZhou/TempBalance | main_tb.py | [
{
"identifier": "Tempbalance",
"path": "tempbalance.py",
"snippet": "class Tempbalance(object):\n def __init__(self, \n net, \n EVALS_THRESH=0.00001,\n bins=100, \n conv_norm=0.5,\n pl_fitting='median',\n ... | import os
import sys
import time
import argparse
import random
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import numpy as np
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import config as cf
import torch_optimizer
from pathlib import Path
from os.path import join
from tempbalance import Tempbalance
from sgdsnr import SGDSNR
from adamp import SGDP, AdamP
from lars_optim import LARS, LAMB
from utils import train, test, getNetwork, save_args_to_file | 10,120 |
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]),
])
data_path = join(args.datadir, args.dataset)
if(args.dataset == 'cifar10'):
print("| Preparing CIFAR-10 dataset...")
sys.stdout.write("| ")
trainset = torchvision.datasets.CIFAR10(root=data_path, train=True,
download=True,
transform=transform_train)
testset = torchvision.datasets.CIFAR10(root=data_path, train=False,
download=False,
transform=transform_test)
num_classes = 10
elif(args.dataset == 'cifar100'):
print("| Preparing CIFAR-100 dataset...")
sys.stdout.write("| ")
trainset = torchvision.datasets.CIFAR100(root=data_path, train=True,
download=True,
transform=transform_train)
testset = torchvision.datasets.CIFAR100(root=data_path, train=False,
download=False,
transform=transform_test)
num_classes = 100
elif(args.dataset == 'svhn'):
print("| Preparing SVHN dataset...")
sys.stdout.write("| ")
trainset = torchvision.datasets.SVHN(root=data_path,
split='train',
download=True,
transform=transform_train)
testset = torchvision.datasets.SVHN(root=data_path,
split='test',
download=True,
transform=transform_test)
num_classes = 10
elif(args.dataset == 'tiny-imagenet-200'):
print("| Preparing tiny-imagenet-200 dataset...")
sys.stdout.write("| ")
trainset = datasets.ImageFolder(os.path.join(data_path, 'train'), transform_train)
testset = datasets.ImageFolder(os.path.join(data_path, 'val'), transform_test)
num_classes = 200
else:
raise NotImplementedError
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=args.batch_size,
shuffle=True,
num_workers=6)
testloader = torch.utils.data.DataLoader(testset,
batch_size=cf.eval_batchsize[args.dataset],
shuffle=False,
num_workers=4)
Path(args.ckpt_path).mkdir(parents=True, exist_ok=True)
if args.print_tofile:
# Open files for stdout and stderr redirection
stdout_file = open(os.path.join(args.ckpt_path, 'stdout.log'), 'w')
stderr_file = open(os.path.join(args.ckpt_path, 'stderr.log'), 'w')
# Redirect stdout and stderr to the files
sys.stdout = stdout_file
sys.stderr = stderr_file
# Model
print('\n[Phase 2] : Model setup')
if args.resume:
# Load checkpoint
print('| Resuming from checkpoint...')
net, file_name = getNetwork(args, num_classes)
checkpoint = torch.load(args.resume, map_location='cpu')
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['test_acc']
start_epoch = checkpoint['epoch']
print(f"Loaded Epoch: {start_epoch} \n Test Acc: {best_acc:.3f} Train Acc: {checkpoint['train_acc']:.3f}")
else:
print('| Building net type [' + args.net_type + ']...')
net, file_name = getNetwork(args, num_classes)
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
best_acc = 0
if use_cuda:
net.cuda()
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss()
print(net)
if args.use_tb:
print("##############Enable and init Temp Balancing##################")
tb_scheduler = Tempbalance(net=net,
pl_fitting=args.pl_fitting,
xmin_pos=args.xmin_pos,
filter_zeros=args.filter_zeros,
remove_first_layer=args.remove_first_layer,
remove_last_layer=args.remove_last_layer,
esd_metric_for_tb=args.esd_metric_for_tb,
assign_func=args.assign_func,
lr_min_ratio=args.lr_min_ratio,
lr_max_ratio=args.lr_max_ratio,
batchnorm=args.batchnorm,
batchnorm_type=args.batchnorm_type
)
tb_param_group, _ = \
tb_scheduler.build_optimizer_param_group(untuned_lr=args.lr, initialize=True)
if args.optim_type == 'SGD':
optimizer = optim.SGD(tb_param_group,
momentum=0.9,
weight_decay=args.weight_decay)
elif args.optim_type == 'SGDSNR':
| from __future__ import print_function
parser = argparse.ArgumentParser(description='PyTorch CIFAR-10 Training')
parser.add_argument('--lr', type=float, default=0.01, help='learning_rate')
parser.add_argument('--net-type', type=str, default='wide-resnet', help='model')
parser.add_argument('--depth', type=int, default=28, help='depth of model')
parser.add_argument('--num-epochs', type=int, default=200, help='number of epochs')
parser.add_argument('--widen-factor', type=float, default=1, help='width of model')
parser.add_argument('--dataset', type=str, default='cifar10', help='dataset = [cifar10/cifar100]')
parser.add_argument('--lr-sche', type=str, default='cosine', choices=['cosine'])
parser.add_argument('--weight-decay', type=float, default=1e-4) # 5e-4
parser.add_argument('--ckpt-path', type=str, default='', help='path to checkpoints')
parser.add_argument('--print-tofile', default=False, type=lambda x: (str(x).lower() == 'true'), help='print to file')
parser.add_argument('--batch-size', type=int, default=128) # 5e-4
parser.add_argument('--datadir', type=str, default='', help='directory of dataset')
parser.add_argument('--optim-type', type=str, default='SGD', help='type of optimizer')
parser.add_argument('--resume', type=str, default='', help='resume from checkpoint')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--ww-interval', type=int, default=1)
parser.add_argument('--epochs-to-save', type=int, nargs='+', default=[])
parser.add_argument('--pl-fitting', type=str, default='median', choices=['median', 'goodness-of-fit', 'fix-finger'])
# temperature balance related
parser.add_argument('--use-tb', default=True, type=lambda x: (str(x).lower() == 'true'), help='use temp balance')
parser.add_argument('--remove-last-layer', default=True, type=lambda x: (str(x).lower() == 'true'), help='if remove the last layer')
parser.add_argument('--remove-first-layer', default=True, type=lambda x: (str(x).lower() == 'true'), help='if remove the first layer')
parser.add_argument('--batchnorm', default=True, type=lambda x: (str(x).lower() == 'true'), help='balancing batch norm layer')
parser.add_argument('--filter-zeros', default=False, type=lambda x: (str(x).lower() == 'true') )
parser.add_argument('--esd-metric-for-tb', type=str, default='alpha', help='ww metric')
parser.add_argument('--assign-func', type=str, default='', help='assignment function for layerwise lr')
parser.add_argument('--lr-min-ratio', type=float, default=0.5)
parser.add_argument('--lr-max-ratio', type=float, default=1.5)
parser.add_argument('--xmin-pos', type=float, default=2, help='xmin_index = size of eigs // xmin_pos')
parser.add_argument('--batchnorm-type', type=str, default='name', help='method to change batchnorm layer learning rate')
parser.add_argument('--look-k', type=int, default=5, help='')
parser.add_argument('--look-alpha', type=float, default=0.8, help='')
parser.add_argument('--T_0', type=int, default=10, help='')
parser.add_argument('--T-mult', type=int, default=2, help='')
# spectral regularization related
parser.add_argument('--sg', type=float, default=0.01, help='spectrum regularization')
args = parser.parse_args()
print(args)
# Save the arguments to a file
save_args_to_file(args, join(args.ckpt_path, 'args.json'))
def set_seed(seed=42):
print(f"=====> Set the random seed as {seed}")
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# When running on the CuDNN backend, two further options must be set
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Set a fixed value for the hash seed
os.environ["PYTHONHASHSEED"] = str(seed)
# Hyper Parameter settings
use_cuda = torch.cuda.is_available()
best_acc = 0
start_epoch = cf.start_epoch
set_seed(args.seed)
# Data Loader
print('\n[Phase 1] : Data Preparation')
print(f"prepare preprocessing, {args.dataset}")
transform_train = transforms.Compose([
transforms.RandomCrop(cf.crop_size[args.dataset], padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]),
]) # meanstd transformation
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(cf.mean[args.dataset], cf.std[args.dataset]),
])
data_path = join(args.datadir, args.dataset)
if(args.dataset == 'cifar10'):
print("| Preparing CIFAR-10 dataset...")
sys.stdout.write("| ")
trainset = torchvision.datasets.CIFAR10(root=data_path, train=True,
download=True,
transform=transform_train)
testset = torchvision.datasets.CIFAR10(root=data_path, train=False,
download=False,
transform=transform_test)
num_classes = 10
elif(args.dataset == 'cifar100'):
print("| Preparing CIFAR-100 dataset...")
sys.stdout.write("| ")
trainset = torchvision.datasets.CIFAR100(root=data_path, train=True,
download=True,
transform=transform_train)
testset = torchvision.datasets.CIFAR100(root=data_path, train=False,
download=False,
transform=transform_test)
num_classes = 100
elif(args.dataset == 'svhn'):
print("| Preparing SVHN dataset...")
sys.stdout.write("| ")
trainset = torchvision.datasets.SVHN(root=data_path,
split='train',
download=True,
transform=transform_train)
testset = torchvision.datasets.SVHN(root=data_path,
split='test',
download=True,
transform=transform_test)
num_classes = 10
elif(args.dataset == 'tiny-imagenet-200'):
print("| Preparing tiny-imagenet-200 dataset...")
sys.stdout.write("| ")
trainset = datasets.ImageFolder(os.path.join(data_path, 'train'), transform_train)
testset = datasets.ImageFolder(os.path.join(data_path, 'val'), transform_test)
num_classes = 200
else:
raise NotImplementedError
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=args.batch_size,
shuffle=True,
num_workers=6)
testloader = torch.utils.data.DataLoader(testset,
batch_size=cf.eval_batchsize[args.dataset],
shuffle=False,
num_workers=4)
Path(args.ckpt_path).mkdir(parents=True, exist_ok=True)
if args.print_tofile:
# Open files for stdout and stderr redirection
stdout_file = open(os.path.join(args.ckpt_path, 'stdout.log'), 'w')
stderr_file = open(os.path.join(args.ckpt_path, 'stderr.log'), 'w')
# Redirect stdout and stderr to the files
sys.stdout = stdout_file
sys.stderr = stderr_file
# Model
print('\n[Phase 2] : Model setup')
if args.resume:
# Load checkpoint
print('| Resuming from checkpoint...')
net, file_name = getNetwork(args, num_classes)
checkpoint = torch.load(args.resume, map_location='cpu')
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
net.load_state_dict(checkpoint['net'])
best_acc = checkpoint['test_acc']
start_epoch = checkpoint['epoch']
print(f"Loaded Epoch: {start_epoch} \n Test Acc: {best_acc:.3f} Train Acc: {checkpoint['train_acc']:.3f}")
else:
print('| Building net type [' + args.net_type + ']...')
net, file_name = getNetwork(args, num_classes)
net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
best_acc = 0
if use_cuda:
net.cuda()
cudnn.benchmark = True
criterion = nn.CrossEntropyLoss()
print(net)
if args.use_tb:
print("##############Enable and init Temp Balancing##################")
tb_scheduler = Tempbalance(net=net,
pl_fitting=args.pl_fitting,
xmin_pos=args.xmin_pos,
filter_zeros=args.filter_zeros,
remove_first_layer=args.remove_first_layer,
remove_last_layer=args.remove_last_layer,
esd_metric_for_tb=args.esd_metric_for_tb,
assign_func=args.assign_func,
lr_min_ratio=args.lr_min_ratio,
lr_max_ratio=args.lr_max_ratio,
batchnorm=args.batchnorm,
batchnorm_type=args.batchnorm_type
)
tb_param_group, _ = \
tb_scheduler.build_optimizer_param_group(untuned_lr=args.lr, initialize=True)
if args.optim_type == 'SGD':
optimizer = optim.SGD(tb_param_group,
momentum=0.9,
weight_decay=args.weight_decay)
elif args.optim_type == 'SGDSNR': | optimizer = SGDSNR(tb_param_group, | 1 | 2023-10-24 00:45:55+00:00 | 12k |
zhaojw1998/AccoMontage-3 | piano_arranger/models/Poly_Dis.py | [
{
"identifier": "PytorchModel",
"path": "piano_arranger/models/amc_dl/torch_plus/module.py",
"snippet": "class PytorchModel(nn.Module):\n\n def __init__(self, name, device):\n self.name = name\n super(PytorchModel, self).__init__()\n if device is None:\n device = torch... | from .amc_dl.torch_plus import PytorchModel
from .amc_dl.torch_plus.train_utils import get_zs_from_dists, kl_with_normal
from torch import nn
from torch.distributions import Normal
from .ptvae import RnnEncoder, RnnDecoder, PtvaeDecoder, TextureEncoder
import torch
import numpy as np | 9,526 | return est_x
def inference(self, pr_mat, c, sample):
self.eval()
with torch.no_grad():
dist_chd = self.chd_encoder(c)
dist_rhy = self.rhy_encoder(pr_mat)
z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], sample)
dec_z = torch.cat([z_chd, z_rhy], dim=-1)
pitch_outs, dur_outs = self.decoder(dec_z, True, None,
None, 0., 0.)
est_x, _, _ = self.decoder.output_to_numpy(pitch_outs, dur_outs)
return est_x
def swap(self, pr_mat1, pr_mat2, c1, c2, fix_rhy, fix_chd):
pr_mat = pr_mat1 if fix_rhy else pr_mat2
c = c1 if fix_chd else c2
est_x = self.inference(pr_mat, c, sample=False)
return est_x
def posterior_sample(self, pr_mat, c, scale=None, sample_chd=True,
sample_txt=True):
if scale is None and sample_chd and sample_txt:
est_x = self.inference(pr_mat, c, sample=True)
else:
dist_chd, dist_rhy = self.inference_encode(pr_mat, c)
if scale is not None:
mean_chd = dist_chd.mean
mean_rhy = dist_rhy.mean
# std_chd = torch.ones_like(dist_chd.mean) * scale
# std_rhy = torch.ones_like(dist_rhy.mean) * scale
std_chd = dist_chd.scale * scale
std_rhy = dist_rhy.scale * scale
dist_rhy = Normal(mean_rhy, std_rhy)
dist_chd = Normal(mean_chd, std_chd)
z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], True)
if not sample_chd:
z_chd = dist_chd.mean
if not sample_txt:
z_rhy = dist_rhy.mean
est_x = self.inference_decode(z_chd, z_rhy)
return est_x
def prior_sample(self, x, c, sample_chd=False, sample_rhy=False,
scale=1.):
dist_chd, dist_rhy = self.inference_encode(x, c)
mean = torch.zeros_like(dist_rhy.mean)
loc = torch.ones_like(dist_rhy.mean) * scale
if sample_chd:
dist_chd = Normal(mean, loc)
if sample_rhy:
dist_rhy = Normal(mean, loc)
z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], True)
return self.inference_decode(z_chd, z_rhy)
def gt_sample(self, x):
out = x[:, :, 1:].numpy()
return out
def interp(self, pr_mat1, c1, pr_mat2, c2, interp_chd=False,
interp_rhy=False, int_count=10):
dist_chd1, dist_rhy1 = self.inference_encode(pr_mat1, c1)
dist_chd2, dist_rhy2 = self.inference_encode(pr_mat2, c2)
[z_chd1, z_rhy1, z_chd2, z_rhy2] = \
get_zs_from_dists([dist_chd1, dist_rhy1, dist_chd2, dist_rhy2],
False)
if interp_chd:
z_chds = self.interp_z(z_chd1, z_chd2, int_count)
else:
z_chds = z_chd1.unsqueeze(1).repeat(1, int_count, 1)
if interp_rhy:
z_rhys = self.interp_z(z_rhy1, z_rhy2, int_count)
else:
z_rhys = z_rhy1.unsqueeze(1).repeat(1, int_count, 1)
bs = z_chds.size(0)
z_chds = z_chds.view(bs * int_count, -1).contiguous()
z_rhys = z_rhys.view(bs * int_count, -1).contiguous()
estxs = self.inference_decode(z_chds, z_rhys)
return estxs.reshape((bs, int_count, 32, 15, -1))
def interp_z(self, z1, z2, int_count=10):
z1 = z1.numpy()
z2 = z2.numpy()
zs = torch.stack([self.interp_path(zz1, zz2, int_count)
for zz1, zz2 in zip(z1, z2)], dim=0)
return zs
def interp_path(self, z1, z2, interpolation_count=10):
result_shape = z1.shape
z1 = z1.reshape(-1)
z2 = z2.reshape(-1)
def slerp2(p0, p1, t):
omega = np.arccos(
np.dot(p0 / np.linalg.norm(p0), p1 / np.linalg.norm(p1)))
so = np.sin(omega)
return np.sin((1.0 - t) * omega)[:, None] / so * p0[
None] + np.sin(
t * omega)[:, None] / so * p1[None]
percentages = np.linspace(0.0, 1.0, interpolation_count)
normalized_z1 = z1 / np.linalg.norm(z1)
normalized_z2 = z2 / np.linalg.norm(z2)
dirs = slerp2(normalized_z1, normalized_z2, percentages)
length = np.linspace(np.log(np.linalg.norm(z1)),
np.log(np.linalg.norm(z2)),
interpolation_count)
out = (dirs * np.exp(length[:, None])).reshape(
[interpolation_count] + list(result_shape))
# out = np.array([(1 - t) * z1 + t * z2 for t in percentages])
return torch.from_numpy(out).to(self.device).float()
@staticmethod
def init_model(device=None, chd_size=256, txt_size=256, num_channel=10):
name = 'disvae'
if device is None:
device = torch.device('cuda' if torch.cuda.is_available()
else 'cpu')
# chd_encoder = RnnEncoder(36, 1024, 256)
|
"""
Credit to Z. Wang et al., "Learning interpretable representation for controllable polyphonic music generation," ISMIR 2020.
https://github.com/ZZWaang/polyphonic-chord-texture-disentanglement
"""
class DisentangleVAE(PytorchModel):
def __init__(self, name, device, chd_encoder, rhy_encoder, decoder,
chd_decoder):
super(DisentangleVAE, self).__init__(name, device)
self.chd_encoder = chd_encoder
self.rhy_encoder = rhy_encoder
self.decoder = decoder
self.num_step = self.decoder.num_step
self.chd_decoder = chd_decoder
def confuse_prmat(self, pr_mat):
non_zero_ent = torch.nonzero(pr_mat.long())
eps = torch.randint(0, 2, (non_zero_ent.size(0),))
eps = ((2 * eps) - 1).long()
confuse_ent = torch.clamp(non_zero_ent[:, 2] + eps, min=0, max=127)
pr_mat[non_zero_ent[:, 0], non_zero_ent[:, 1], confuse_ent] = \
pr_mat[non_zero_ent[:, 0], non_zero_ent[:, 1], non_zero_ent[:, 2]]
return pr_mat
def get_chroma(self, pr_mat):
bs = pr_mat.size(0)
pad = torch.zeros(bs, 32, 4).to(self.device)
pr_mat = torch.cat([pr_mat, pad], dim=-1)
c = pr_mat.view(bs, 32, -1, 12).contiguous()
c = c.sum(dim=-2) # (bs, 32, 12)
c = c.view(bs, 8, 4, 12)
c = c.sum(dim=-2).float()
c = torch.log(c + 1)
return c.to(self.device)
def run(self, x, c, pr_mat, tfr1, tfr2, tfr3, confuse=True):
embedded_x, lengths = self.decoder.emb_x(x)
# cc = self.get_chroma(pr_mat)
dist_chd = self.chd_encoder(c)
# pr_mat = self.confuse_prmat(pr_mat)
dist_rhy = self.rhy_encoder(pr_mat)
z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], True)
dec_z = torch.cat([z_chd, z_rhy], dim=-1)
pitch_outs, dur_outs = self.decoder(dec_z, False, embedded_x,
lengths, tfr1, tfr2)
recon_root, recon_chroma, recon_bass = self.chd_decoder(z_chd, False,
tfr3, c)
return pitch_outs, dur_outs, dist_chd, dist_rhy, recon_root, \
recon_chroma, recon_bass
def loss_function(self, x, c, recon_pitch, recon_dur, dist_chd,
dist_rhy, recon_root, recon_chroma, recon_bass,
beta, weights, weighted_dur=False):
recon_loss, pl, dl = self.decoder.recon_loss(x, recon_pitch, recon_dur,
weights, weighted_dur)
kl_loss, kl_chd, kl_rhy = self.kl_loss(dist_chd, dist_rhy)
chord_loss, root, chroma, bass = self.chord_loss(c, recon_root,
recon_chroma,
recon_bass)
loss = recon_loss + beta * kl_loss + chord_loss
return loss, recon_loss, pl, dl, kl_loss, kl_chd, kl_rhy, chord_loss, \
root, chroma, bass
def chord_loss(self, c, recon_root, recon_chroma, recon_bass):
loss_fun = nn.CrossEntropyLoss()
root = c[:, :, 0: 12].max(-1)[-1].view(-1).contiguous()
chroma = c[:, :, 12: 24].long().view(-1).contiguous()
bass = c[:, :, 24:].max(-1)[-1].view(-1).contiguous()
recon_root = recon_root.view(-1, 12).contiguous()
recon_chroma = recon_chroma.view(-1, 2).contiguous()
recon_bass = recon_bass.view(-1, 12).contiguous()
root_loss = loss_fun(recon_root, root)
chroma_loss = loss_fun(recon_chroma, chroma)
bass_loss = loss_fun(recon_bass, bass)
chord_loss = root_loss + chroma_loss + bass_loss
return chord_loss, root_loss, chroma_loss, bass_loss
def kl_loss(self, *dists):
# kl = kl_with_normal(dists[0])
kl_chd = kl_with_normal(dists[0])
kl_rhy = kl_with_normal(dists[1])
kl_loss = kl_chd + kl_rhy
return kl_loss, kl_chd, kl_rhy
def loss(self, x, c, pr_mat, dt_x, tfr1=0., tfr2=0., tfr3=0., beta=0.1, weights=(1, 0.5)):
#print(pr_mat.shape, dt_x.shape)
outputs = self.run(x, c, pr_mat, tfr1, tfr2, tfr3)
loss = self.loss_function(x, c, *outputs, beta, weights)
return loss
# def inference(self, c, pr_mat):
# self.eval()
# with torch.no_grad():
# dist_chd = self.chd_encoder(c)
# # pr_mat = self.confuse_prmat(pr_mat)
# dist_rhy = self.rhy_encoder(pr_mat)
# z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], True)
# dec_z = torch.cat([z_chd, z_rhy], dim=-1)
# pitch_outs, dur_outs = self.decoder(dec_z, True, None,
# None, 0., 0.)
# est_x, _, _ = self.decoder.output_to_numpy(pitch_outs, dur_outs)
# return est_x
#
# def swap(self, c1, c2, pr_mat1, pr_mat2, fix_rhy, fix_chd):
# pr_mat = pr_mat1 if fix_rhy else pr_mat2
# c = c1 if fix_chd else c2
# est_x = self.inference(c, pr_mat)
# return est_x
def inference_encode(self, pr_mat, c):
self.eval()
with torch.no_grad():
dist_chd = self.chd_encoder(c)
dist_rhy = self.rhy_encoder(pr_mat)
return dist_chd, dist_rhy
def inference_decode(self, z_chd, z_rhy):
self.eval()
with torch.no_grad():
dec_z = torch.cat([z_chd, z_rhy], dim=-1)
pitch_outs, dur_outs = self.decoder(dec_z, True, None,
None, 0., 0.)
est_x, _, _ = self.decoder.output_to_numpy(pitch_outs, dur_outs)
return est_x
def inference(self, pr_mat, c, sample):
self.eval()
with torch.no_grad():
dist_chd = self.chd_encoder(c)
dist_rhy = self.rhy_encoder(pr_mat)
z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], sample)
dec_z = torch.cat([z_chd, z_rhy], dim=-1)
pitch_outs, dur_outs = self.decoder(dec_z, True, None,
None, 0., 0.)
est_x, _, _ = self.decoder.output_to_numpy(pitch_outs, dur_outs)
return est_x
def swap(self, pr_mat1, pr_mat2, c1, c2, fix_rhy, fix_chd):
pr_mat = pr_mat1 if fix_rhy else pr_mat2
c = c1 if fix_chd else c2
est_x = self.inference(pr_mat, c, sample=False)
return est_x
def posterior_sample(self, pr_mat, c, scale=None, sample_chd=True,
sample_txt=True):
if scale is None and sample_chd and sample_txt:
est_x = self.inference(pr_mat, c, sample=True)
else:
dist_chd, dist_rhy = self.inference_encode(pr_mat, c)
if scale is not None:
mean_chd = dist_chd.mean
mean_rhy = dist_rhy.mean
# std_chd = torch.ones_like(dist_chd.mean) * scale
# std_rhy = torch.ones_like(dist_rhy.mean) * scale
std_chd = dist_chd.scale * scale
std_rhy = dist_rhy.scale * scale
dist_rhy = Normal(mean_rhy, std_rhy)
dist_chd = Normal(mean_chd, std_chd)
z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], True)
if not sample_chd:
z_chd = dist_chd.mean
if not sample_txt:
z_rhy = dist_rhy.mean
est_x = self.inference_decode(z_chd, z_rhy)
return est_x
def prior_sample(self, x, c, sample_chd=False, sample_rhy=False,
scale=1.):
dist_chd, dist_rhy = self.inference_encode(x, c)
mean = torch.zeros_like(dist_rhy.mean)
loc = torch.ones_like(dist_rhy.mean) * scale
if sample_chd:
dist_chd = Normal(mean, loc)
if sample_rhy:
dist_rhy = Normal(mean, loc)
z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], True)
return self.inference_decode(z_chd, z_rhy)
def gt_sample(self, x):
out = x[:, :, 1:].numpy()
return out
def interp(self, pr_mat1, c1, pr_mat2, c2, interp_chd=False,
interp_rhy=False, int_count=10):
dist_chd1, dist_rhy1 = self.inference_encode(pr_mat1, c1)
dist_chd2, dist_rhy2 = self.inference_encode(pr_mat2, c2)
[z_chd1, z_rhy1, z_chd2, z_rhy2] = \
get_zs_from_dists([dist_chd1, dist_rhy1, dist_chd2, dist_rhy2],
False)
if interp_chd:
z_chds = self.interp_z(z_chd1, z_chd2, int_count)
else:
z_chds = z_chd1.unsqueeze(1).repeat(1, int_count, 1)
if interp_rhy:
z_rhys = self.interp_z(z_rhy1, z_rhy2, int_count)
else:
z_rhys = z_rhy1.unsqueeze(1).repeat(1, int_count, 1)
bs = z_chds.size(0)
z_chds = z_chds.view(bs * int_count, -1).contiguous()
z_rhys = z_rhys.view(bs * int_count, -1).contiguous()
estxs = self.inference_decode(z_chds, z_rhys)
return estxs.reshape((bs, int_count, 32, 15, -1))
def interp_z(self, z1, z2, int_count=10):
z1 = z1.numpy()
z2 = z2.numpy()
zs = torch.stack([self.interp_path(zz1, zz2, int_count)
for zz1, zz2 in zip(z1, z2)], dim=0)
return zs
def interp_path(self, z1, z2, interpolation_count=10):
result_shape = z1.shape
z1 = z1.reshape(-1)
z2 = z2.reshape(-1)
def slerp2(p0, p1, t):
omega = np.arccos(
np.dot(p0 / np.linalg.norm(p0), p1 / np.linalg.norm(p1)))
so = np.sin(omega)
return np.sin((1.0 - t) * omega)[:, None] / so * p0[
None] + np.sin(
t * omega)[:, None] / so * p1[None]
percentages = np.linspace(0.0, 1.0, interpolation_count)
normalized_z1 = z1 / np.linalg.norm(z1)
normalized_z2 = z2 / np.linalg.norm(z2)
dirs = slerp2(normalized_z1, normalized_z2, percentages)
length = np.linspace(np.log(np.linalg.norm(z1)),
np.log(np.linalg.norm(z2)),
interpolation_count)
out = (dirs * np.exp(length[:, None])).reshape(
[interpolation_count] + list(result_shape))
# out = np.array([(1 - t) * z1 + t * z2 for t in percentages])
return torch.from_numpy(out).to(self.device).float()
@staticmethod
def init_model(device=None, chd_size=256, txt_size=256, num_channel=10):
name = 'disvae'
if device is None:
device = torch.device('cuda' if torch.cuda.is_available()
else 'cpu')
# chd_encoder = RnnEncoder(36, 1024, 256) | chd_encoder = RnnEncoder(36, 1024, chd_size) | 3 | 2023-10-23 12:36:57+00:00 | 12k |
bytedance/ColTrack | motlib/mot_models/network/dino_mot/layer/deformable_transformer/default_decoder.py | [
{
"identifier": "DeformableTransformerDecoderLayer",
"path": "models/dino/deformable_transformer.py",
"snippet": "class DeformableTransformerDecoderLayer(nn.Module):\n def __init__(self, d_model=256, d_ffn=1024,\n dropout=0.1, activation=\"relu\",\n n_levels=4, n_heads... | from turtle import forward
from models.dino.deformable_transformer import DeformableTransformerDecoderLayer, TransformerDecoder
from models.dino.utils import gen_encoder_output_proposals, MLP,_get_activation_fn, gen_sineembed_for_position
from models.dino.ops.modules import MSDeformAttn
from torch import nn, Tensor
from typing import Optional
from util.misc import inverse_sigmoid, scale_sigmoid
import torch
import math, random | 10,427 | memory = memory,
memory_key_padding_mask = memory_key_padding_mask,
memory_level_start_index = level_start_index,
memory_spatial_shapes = spatial_shapes,
memory_pos = pos,
self_attn_mask = tgt_mask,
cross_attn_mask = memory_mask,
other_input=other_input
)
if isinstance(output, (list, tuple)):
output, track_res_layer = output
else:
track_res_layer = None
# iter update
if self.bbox_embed is not None:
reference_before_sigmoid = inverse_sigmoid(reference_points)
delta_unsig = self.bbox_embed[layer_id](output)
outputs_unsig = delta_unsig + reference_before_sigmoid
new_reference_points = scale_sigmoid(outputs_unsig.sigmoid())
# select # ref points
if self.dec_layer_number is not None and layer_id != self.num_layers - 1:
# import ipdb; ipdb.set_trace()
nq_now = new_reference_points.shape[0]
select_number = self.dec_layer_number[layer_id + 1]
if nq_now != select_number:
class_unselected = self.class_embed[layer_id](output) # nq, bs, 91
topk_proposals = torch.topk(class_unselected.max(-1)[0], select_number, dim=0)[1] # new_nq, bs
new_reference_points = torch.gather(new_reference_points, 0, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) # unsigmoid
if self.rm_detach and 'dec' in self.rm_detach:
reference_points = new_reference_points
else:
reference_points = new_reference_points.detach()
if self.use_detached_boxes_dec_out:
ref_points.append(reference_points)
else:
ref_points.append(new_reference_points)
intermediate.append(self.norm(output))
if self.dec_layer_number is not None and layer_id != self.num_layers - 1:
if nq_now != select_number:
output = torch.gather(output, 0, topk_proposals.unsqueeze(-1).repeat(1, 1, self.d_model)) # unsigmoid
return output, reference_points, track_res_layer
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
refpoints_unsigmoid: Optional[Tensor] = None, # num_queries, bs, 2
# for memory
level_start_index: Optional[Tensor] = None, # num_levels
spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2
valid_ratios: Optional[Tensor] = None,
track_instances = None,
track_info = None
):
"""
Input:
- tgt: nq, bs, d_model
- memory: hw, bs, d_model
- pos: hw, bs, d_model
- refpoints_unsigmoid: nq, bs, 2/4
- valid_ratios/spatial_shapes: bs, nlevel, 2
"""
output = tgt
intermediate = []
reference_points = scale_sigmoid(refpoints_unsigmoid.sigmoid())
ref_points = [reference_points]
track_res = {}
for layer_id, layer in enumerate(self.layers):
# preprocess ref points
output, reference_points, track_res_layer = self.forward_one_layer(layer_id=layer_id, layer=layer, output=output, memory=memory,reference_points=reference_points, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask, level_start_index=level_start_index,spatial_shapes=spatial_shapes,valid_ratios=valid_ratios,pos=pos, tgt_mask=tgt_mask, memory_mask=memory_mask, ref_points=ref_points, intermediate=intermediate)
track_res[layer_id] = track_res_layer
return [
[itm_out.transpose(0, 1) for itm_out in intermediate],
[itm_refpoint.transpose(0, 1) for itm_refpoint in ref_points],
track_res
]
class MotDeformableTransformerDecoderLayer(DeformableTransformerDecoderLayer):
def __init__(self, args, layer_id, d_model=256, d_ffn=1024, dropout=0.1, activation="relu", n_levels=4, n_heads=8, n_points=4, use_deformable_box_attn=False, box_attn_type='roi_align', key_aware_type=None, decoder_sa_type='ca', module_seq=...):
self.args = args
self.layer_id = layer_id
self.dropout_p = dropout
self.n_heads = n_heads
super(DeformableTransformerDecoderLayer, self).__init__()
self.module_seq = module_seq
assert sorted(module_seq) == ['ca', 'ffn', 'sa']
# cross attention
# self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
if use_deformable_box_attn:
self.cross_attn = MSDeformableBoxAttention(d_model, n_levels, n_heads, n_boxes=n_points, used_func=box_attn_type)
else:
self.cross_attn = self.init_cross_attn(d_model, n_levels, n_heads, n_points)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# self attention
self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.dropout2 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn)
| # Copyright (2023) Bytedance Ltd. and/or its affiliates
class MotTransformerDecoder(TransformerDecoder):
def __init__(self, args, decoder_layer, num_layers, norm=None, return_intermediate=False, d_model=256, query_dim=4, modulate_hw_attn=False, num_feature_levels=1, deformable_decoder=False, decoder_query_perturber=None, dec_layer_number=None, rm_dec_query_scale=False, dec_layer_share=False, dec_layer_dropout_prob=None, use_detached_boxes_dec_out=False):
super(TransformerDecoder, self).__init__()
self.args = args
self.layers = decoder_layer
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
assert return_intermediate, "support return_intermediate only"
self.query_dim = query_dim
assert query_dim in [2, 4], "query_dim should be 2/4 but {}".format(query_dim)
self.num_feature_levels = num_feature_levels
self.use_detached_boxes_dec_out = use_detached_boxes_dec_out
self.ref_point_head = MLP(query_dim // 2 * d_model, d_model, d_model, 2)
if not deformable_decoder:
self.query_pos_sine_scale = MLP(d_model, d_model, d_model, 2)
else:
self.query_pos_sine_scale = None
if rm_dec_query_scale:
self.query_scale = None
else:
raise NotImplementedError
self.query_scale = MLP(d_model, d_model, d_model, 2)
self.bbox_embed = None
self.class_embed = None
self.d_model = d_model
self.modulate_hw_attn = modulate_hw_attn
self.deformable_decoder = deformable_decoder
if not deformable_decoder and modulate_hw_attn:
self.ref_anchor_head = MLP(d_model, d_model, 2, 2)
else:
self.ref_anchor_head = None
self.decoder_query_perturber = decoder_query_perturber
self.box_pred_damping = None
self.dec_layer_number = dec_layer_number
if dec_layer_number is not None:
assert isinstance(dec_layer_number, list)
assert len(dec_layer_number) == num_layers
# assert dec_layer_number[0] ==
self.dec_layer_dropout_prob = dec_layer_dropout_prob
if dec_layer_dropout_prob is not None:
assert isinstance(dec_layer_dropout_prob, list)
assert len(dec_layer_dropout_prob) == num_layers
for i in dec_layer_dropout_prob:
assert 0.0 <= i <= 1.0
self.rm_detach = None
self.init()
def init(self):
pass
def forward_one_layer(self, layer_id, layer, output, memory, reference_points, tgt_key_padding_mask, memory_key_padding_mask, level_start_index, spatial_shapes, valid_ratios, pos, tgt_mask, memory_mask, ref_points, intermediate, other_input=None):
if self.training and self.decoder_query_perturber is not None and layer_id != 0:
reference_points = self.decoder_query_perturber(reference_points)
if self.deformable_decoder:
if reference_points.shape[-1] == 4:
reference_points_input = reference_points[:, :, None] \
* torch.cat([valid_ratios, valid_ratios], -1)[None, :] # nq, bs, nlevel, 4
else:
assert reference_points.shape[-1] == 2
reference_points_input = reference_points[:, :, None] * valid_ratios[None, :]
query_sine_embed = gen_sineembed_for_position(reference_points_input[:, :, 0, :]) # nq, bs, 256*2
else:
query_sine_embed = gen_sineembed_for_position(reference_points) # nq, bs, 256*2
reference_points_input = None
# conditional query
# import ipdb; ipdb.set_trace()
raw_query_pos = self.ref_point_head(query_sine_embed) # nq, bs, 256
pos_scale = self.query_scale(output) if self.query_scale is not None else 1
query_pos = pos_scale * raw_query_pos
if not self.deformable_decoder:
query_sine_embed = query_sine_embed[..., :self.d_model] * self.query_pos_sine_scale(output)
# modulated HW attentions
if not self.deformable_decoder and self.modulate_hw_attn:
refHW_cond = scale_sigmoid(self.ref_anchor_head(output).sigmoid()) # nq, bs, 2
query_sine_embed[..., self.d_model // 2:] *= (refHW_cond[..., 0] / reference_points[..., 2]).unsqueeze(-1)
query_sine_embed[..., :self.d_model // 2] *= (refHW_cond[..., 1] / reference_points[..., 3]).unsqueeze(-1)
# main process
# import ipdb; ipdb.set_trace()
dropflag = False
if self.dec_layer_dropout_prob is not None:
prob = random.random()
if prob < self.dec_layer_dropout_prob[layer_id]:
dropflag = True
if not dropflag:
output = layer(
tgt = output,
tgt_query_pos = query_pos,
tgt_query_sine_embed = query_sine_embed,
tgt_key_padding_mask = tgt_key_padding_mask,
tgt_reference_points = reference_points_input,
memory = memory,
memory_key_padding_mask = memory_key_padding_mask,
memory_level_start_index = level_start_index,
memory_spatial_shapes = spatial_shapes,
memory_pos = pos,
self_attn_mask = tgt_mask,
cross_attn_mask = memory_mask,
other_input=other_input
)
if isinstance(output, (list, tuple)):
output, track_res_layer = output
else:
track_res_layer = None
# iter update
if self.bbox_embed is not None:
reference_before_sigmoid = inverse_sigmoid(reference_points)
delta_unsig = self.bbox_embed[layer_id](output)
outputs_unsig = delta_unsig + reference_before_sigmoid
new_reference_points = scale_sigmoid(outputs_unsig.sigmoid())
# select # ref points
if self.dec_layer_number is not None and layer_id != self.num_layers - 1:
# import ipdb; ipdb.set_trace()
nq_now = new_reference_points.shape[0]
select_number = self.dec_layer_number[layer_id + 1]
if nq_now != select_number:
class_unselected = self.class_embed[layer_id](output) # nq, bs, 91
topk_proposals = torch.topk(class_unselected.max(-1)[0], select_number, dim=0)[1] # new_nq, bs
new_reference_points = torch.gather(new_reference_points, 0, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) # unsigmoid
if self.rm_detach and 'dec' in self.rm_detach:
reference_points = new_reference_points
else:
reference_points = new_reference_points.detach()
if self.use_detached_boxes_dec_out:
ref_points.append(reference_points)
else:
ref_points.append(new_reference_points)
intermediate.append(self.norm(output))
if self.dec_layer_number is not None and layer_id != self.num_layers - 1:
if nq_now != select_number:
output = torch.gather(output, 0, topk_proposals.unsqueeze(-1).repeat(1, 1, self.d_model)) # unsigmoid
return output, reference_points, track_res_layer
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
refpoints_unsigmoid: Optional[Tensor] = None, # num_queries, bs, 2
# for memory
level_start_index: Optional[Tensor] = None, # num_levels
spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2
valid_ratios: Optional[Tensor] = None,
track_instances = None,
track_info = None
):
"""
Input:
- tgt: nq, bs, d_model
- memory: hw, bs, d_model
- pos: hw, bs, d_model
- refpoints_unsigmoid: nq, bs, 2/4
- valid_ratios/spatial_shapes: bs, nlevel, 2
"""
output = tgt
intermediate = []
reference_points = scale_sigmoid(refpoints_unsigmoid.sigmoid())
ref_points = [reference_points]
track_res = {}
for layer_id, layer in enumerate(self.layers):
# preprocess ref points
output, reference_points, track_res_layer = self.forward_one_layer(layer_id=layer_id, layer=layer, output=output, memory=memory,reference_points=reference_points, tgt_key_padding_mask=tgt_key_padding_mask, memory_key_padding_mask=memory_key_padding_mask, level_start_index=level_start_index,spatial_shapes=spatial_shapes,valid_ratios=valid_ratios,pos=pos, tgt_mask=tgt_mask, memory_mask=memory_mask, ref_points=ref_points, intermediate=intermediate)
track_res[layer_id] = track_res_layer
return [
[itm_out.transpose(0, 1) for itm_out in intermediate],
[itm_refpoint.transpose(0, 1) for itm_refpoint in ref_points],
track_res
]
class MotDeformableTransformerDecoderLayer(DeformableTransformerDecoderLayer):
def __init__(self, args, layer_id, d_model=256, d_ffn=1024, dropout=0.1, activation="relu", n_levels=4, n_heads=8, n_points=4, use_deformable_box_attn=False, box_attn_type='roi_align', key_aware_type=None, decoder_sa_type='ca', module_seq=...):
self.args = args
self.layer_id = layer_id
self.dropout_p = dropout
self.n_heads = n_heads
super(DeformableTransformerDecoderLayer, self).__init__()
self.module_seq = module_seq
assert sorted(module_seq) == ['ca', 'ffn', 'sa']
# cross attention
# self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
if use_deformable_box_attn:
self.cross_attn = MSDeformableBoxAttention(d_model, n_levels, n_heads, n_boxes=n_points, used_func=box_attn_type)
else:
self.cross_attn = self.init_cross_attn(d_model, n_levels, n_heads, n_points)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# self attention
self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.dropout2 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn) | self.activation = _get_activation_fn(activation, d_model=d_ffn, batch_dim=1) | 4 | 2023-10-16 02:18:33+00:00 | 12k |
CuriseJia/FreeStyleRet | test.py | [
{
"identifier": "ShallowStyleRetrieval",
"path": "src/models/style_retrieval.py",
"snippet": "class ShallowStyleRetrieval(nn.Module):\n def __init__(self, model_args):\n super(ShallowStyleRetrieval, self).__init__()\n self.args = model_args\n self.openclip, self.pre_process_train... | import argparse
import torch
import torch.nn.functional as F
from tqdm import tqdm
from torch.utils.data import DataLoader
from src.models import ShallowStyleRetrieval, DeepStyleRetrieval, BLIP_Retrieval
from src.dataset.data import T2ITestDataset, I2ITestDataset, X2ITestDataset
from src.utils.utils import setup_seed, getR1Accuary, getR5Accuary | 7,566 |
def parse_args():
parser = argparse.ArgumentParser(description='Parse args for FreeStyleRet Training.')
# project settings
parser.add_argument('--resume', default='', type=str, help='load checkpoints from given path')
parser.add_argument('--origin_resume', default='model_large_retrieval_coco.pth', type=str, help='load checkpoints from given path')
parser.add_argument('--gram_encoder_path', default='pretrained/vgg_normalised.pth', type=str, help='load vgg from given path')
parser.add_argument('--style_cluster_path', default='pretrained/style_cluster.npy', type=str, help='load style prompt from given npy')
parser.add_argument('--device', default='cuda:0')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--num_workers', default=6, type=int)
# data settings
parser.add_argument("--type", type=str, default='style2image', help='choose train text2image or style2image.')
parser.add_argument("--style", type=str, default='sketch', help='choose sketch, art or mosaic.')
parser.add_argument("--test_dataset_path", type=str, default='DSR/')
parser.add_argument("--test_json_path", type=str, default='DSR/test.json')
parser.add_argument("--batch_size", type=int, default=24)
# model settings
parser.add_argument('--prompt', type=str, default='DeepPrompt', help='ShallowPrompt or DeepPrompt')
parser.add_argument('--gram_prompts', type=int, default=4)
parser.add_argument('--gram_prompt_dim', type=int, default=1024)
parser.add_argument('--style_prompts', type=int, default=4)
parser.add_argument('--style_prompt_dim', type=int, default=1024)
args = parser.parse_args()
return args
def eval(args, model, dataloader):
model.eval()
r1 = []
r5 = []
if args.type == 'text2image':
for data in enumerate(tqdm(dataloader)):
if args.prompt == 'BLIP_Retrieval':
caption = data[1][0]
else:
caption = model.tokenizer(data[1][0]).to(args.device, non_blocking=True)
image = data[1][1].to(args.device, non_blocking=True)
image_feature = model(image, dtype='image')
text_feature = model(caption, dtype='text')
image_feature = F.normalize(image_feature, dim=-1)
text_feature = F.normalize(text_feature, dim=-1)
prob = torch.softmax((100.0 * text_feature @ image_feature.T), dim=-1)
r1.append(getR1Accuary(prob))
r5.append(getR5Accuary(prob))
elif args.type == 'style2image':
for data in enumerate(tqdm(dataloader)):
origin_image = data[1][0].to(args.device, non_blocking=True)
retrival_image = data[1][1].to(args.device, non_blocking=True)
original_feature = model(origin_image, dtype='image')
retrival_feature = model(retrival_image, dtype='image')
original_feature = F.normalize(original_feature, dim=-1)
retrival_feature = F.normalize(retrival_feature, dim=-1)
prob = torch.softmax((100.0 * retrival_feature @ original_feature.T), dim=-1)
r1.append(getR1Accuary(prob))
r5.append(getR5Accuary(prob))
else:
for data in enumerate(tqdm(dataloader)):
if args.prompt == 'BLIP_Retrieval':
caption = data[1][0]
else:
caption = model.tokenizer(data[1][0]).to(args.device, non_blocking=True)
origin_image = data[1][1].to(args.device, non_blocking=True)
retrival_image = data[1][2].to(args.device, non_blocking=True)
text_feature = model(caption, dtype='text')
original_feature = model(origin_image, dtype='image')
retrival_feature = model(retrival_image, dtype='image')
text_feature = F.normalize(text_feature, dim=-1)
original_feature = F.normalize(original_feature, dim=-1)
retrival_feature = F.normalize(retrival_feature, dim=-1)
prob1 = torch.softmax((100.0 * text_feature @ original_feature.T), dim=-1)
prob2 = prob = torch.softmax((100.0 * retrival_feature @ original_feature.T), dim=-1)
prob = prob1.max(prob2)
r1.append(getR1Accuary(prob))
r5.append(getR5Accuary(prob))
resr1 = sum(r1)/len(r1)
resr5 = sum(r5)/len(r5)
print('R@1 Acc is {}'.format(resr1))
print('R@5 Acc is {}'.format(resr5))
if __name__ == "__main__":
args = parse_args()
|
def parse_args():
parser = argparse.ArgumentParser(description='Parse args for FreeStyleRet Training.')
# project settings
parser.add_argument('--resume', default='', type=str, help='load checkpoints from given path')
parser.add_argument('--origin_resume', default='model_large_retrieval_coco.pth', type=str, help='load checkpoints from given path')
parser.add_argument('--gram_encoder_path', default='pretrained/vgg_normalised.pth', type=str, help='load vgg from given path')
parser.add_argument('--style_cluster_path', default='pretrained/style_cluster.npy', type=str, help='load style prompt from given npy')
parser.add_argument('--device', default='cuda:0')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--num_workers', default=6, type=int)
# data settings
parser.add_argument("--type", type=str, default='style2image', help='choose train text2image or style2image.')
parser.add_argument("--style", type=str, default='sketch', help='choose sketch, art or mosaic.')
parser.add_argument("--test_dataset_path", type=str, default='DSR/')
parser.add_argument("--test_json_path", type=str, default='DSR/test.json')
parser.add_argument("--batch_size", type=int, default=24)
# model settings
parser.add_argument('--prompt', type=str, default='DeepPrompt', help='ShallowPrompt or DeepPrompt')
parser.add_argument('--gram_prompts', type=int, default=4)
parser.add_argument('--gram_prompt_dim', type=int, default=1024)
parser.add_argument('--style_prompts', type=int, default=4)
parser.add_argument('--style_prompt_dim', type=int, default=1024)
args = parser.parse_args()
return args
def eval(args, model, dataloader):
model.eval()
r1 = []
r5 = []
if args.type == 'text2image':
for data in enumerate(tqdm(dataloader)):
if args.prompt == 'BLIP_Retrieval':
caption = data[1][0]
else:
caption = model.tokenizer(data[1][0]).to(args.device, non_blocking=True)
image = data[1][1].to(args.device, non_blocking=True)
image_feature = model(image, dtype='image')
text_feature = model(caption, dtype='text')
image_feature = F.normalize(image_feature, dim=-1)
text_feature = F.normalize(text_feature, dim=-1)
prob = torch.softmax((100.0 * text_feature @ image_feature.T), dim=-1)
r1.append(getR1Accuary(prob))
r5.append(getR5Accuary(prob))
elif args.type == 'style2image':
for data in enumerate(tqdm(dataloader)):
origin_image = data[1][0].to(args.device, non_blocking=True)
retrival_image = data[1][1].to(args.device, non_blocking=True)
original_feature = model(origin_image, dtype='image')
retrival_feature = model(retrival_image, dtype='image')
original_feature = F.normalize(original_feature, dim=-1)
retrival_feature = F.normalize(retrival_feature, dim=-1)
prob = torch.softmax((100.0 * retrival_feature @ original_feature.T), dim=-1)
r1.append(getR1Accuary(prob))
r5.append(getR5Accuary(prob))
else:
for data in enumerate(tqdm(dataloader)):
if args.prompt == 'BLIP_Retrieval':
caption = data[1][0]
else:
caption = model.tokenizer(data[1][0]).to(args.device, non_blocking=True)
origin_image = data[1][1].to(args.device, non_blocking=True)
retrival_image = data[1][2].to(args.device, non_blocking=True)
text_feature = model(caption, dtype='text')
original_feature = model(origin_image, dtype='image')
retrival_feature = model(retrival_image, dtype='image')
text_feature = F.normalize(text_feature, dim=-1)
original_feature = F.normalize(original_feature, dim=-1)
retrival_feature = F.normalize(retrival_feature, dim=-1)
prob1 = torch.softmax((100.0 * text_feature @ original_feature.T), dim=-1)
prob2 = prob = torch.softmax((100.0 * retrival_feature @ original_feature.T), dim=-1)
prob = prob1.max(prob2)
r1.append(getR1Accuary(prob))
r5.append(getR5Accuary(prob))
resr1 = sum(r1)/len(r1)
resr5 = sum(r5)/len(r5)
print('R@1 Acc is {}'.format(resr1))
print('R@5 Acc is {}'.format(resr5))
if __name__ == "__main__":
args = parse_args() | setup_seed(args.seed) | 6 | 2023-10-17 09:32:57+00:00 | 12k |
liuqidong07/MOELoRA-peft | src/MLoRA/peft/tuners/adalora.py | [
{
"identifier": "PeftType",
"path": "src/MLoRA/peft/utils/config.py",
"snippet": "class PeftType(str, enum.Enum):\n PROMPT_TUNING = \"PROMPT_TUNING\"\n P_TUNING = \"P_TUNING\"\n PREFIX_TUNING = \"PREFIX_TUNING\"\n LORA = \"LORA\"\n ADALORA = \"ADALORA\"\n ADAPTION_PROMPT = \"ADAPTION_P... | import importlib
import re
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
import bitsandbytes as bnb
from dataclasses import dataclass, field
from typing import Optional
from transformers.pytorch_utils import Conv1D
from ..utils import (
TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING,
PeftType,
_freeze_adapter,
_get_submodules,
transpose,
)
from .lora import (
LoraConfig,
LoraLayer,
LoraModel,
mark_only_lora_as_trainable,
) | 8,057 | if not kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to False but the target module is `Conv1D`. "
"Setting fan_in_fan_out to True."
)
kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True
else:
raise ValueError(
f"Target module {target} is not supported. "
f"Currently, only `torch.nn.Linear` and `Conv1D` are supported."
)
new_module = SVDLinear(adapter_name, in_features, out_features, bias=bias, **kwargs)
self._replace_module(parent, target_name, new_module, target)
if not is_target_modules_in_base_model:
raise ValueError(
f"Target modules {lora_config.target_modules} not found in the base model. "
f"Please check the target modules and try again."
)
def __getattr__(self, name: str):
"""Forward missing attributes to the wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
return getattr(self.model, name)
def forward(self, *args, **kwargs):
outputs = self.model.forward(*args, **kwargs)
# Calculate the orthogonal regularization
orth_reg_weight = self.peft_config[self.trainable_adapter_name].orth_reg_weight
assert orth_reg_weight > 0
if hasattr(outputs, "loss"):
regu_loss = 0
num_param = 0
for n, p in self.model.named_parameters():
if ("lora_A" in n or "lora_B" in n) and self.trainable_adapter_name in n:
para_cov = p @ p.T if "lora_A" in n else p.T @ p
I = torch.eye(*para_cov.size(), out=torch.empty_like(para_cov))
I.requires_grad = False
num_param += 1
regu_loss += torch.norm(para_cov - I, p="fro")
regu_loss = regu_loss / num_param
outputs.loss += orth_reg_weight * regu_loss
return outputs
def resize_modules_by_rank_pattern(self, rank_pattern, adapter_name):
lora_config = self.peft_config[adapter_name]
for name, rank_idx in rank_pattern.items():
if isinstance(rank_idx, list):
rank = sum(rank_idx)
elif isinstance(rank_idx, torch.Tensor):
rank_idx = rank_idx.view(-1)
rank = rank_idx.sum().item()
else:
raise ValueError("Unexcepted type of rank_idx")
key = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1])
_, target, _ = _get_submodules(self.model, key)
lora_E_weights = target.lora_E[adapter_name][rank_idx]
lora_A_weights = target.lora_A[adapter_name][rank_idx]
lora_B_weights = target.lora_B[adapter_name][:, rank_idx]
ranknum = target.ranknum[adapter_name]
target.update_layer(
adapter_name,
rank,
lora_config.lora_alpha,
lora_config.lora_dropout,
lora_config.init_lora_weights,
)
with torch.no_grad():
if rank > 0:
target.lora_E[adapter_name].copy_(lora_E_weights)
target.lora_A[adapter_name].copy_(lora_A_weights)
target.lora_B[adapter_name].copy_(lora_B_weights)
# The scaling is exactly as the previous
target.ranknum[adapter_name].copy_(ranknum)
def resize_state_dict_by_rank_pattern(self, rank_pattern, state_dict, adapter_name):
for name, rank_idx in rank_pattern.items():
rank = sum(rank_idx)
prefix = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1])
for layer in ["lora_E", "lora_A", "lora_B"]:
key = f"base_model.model.{prefix}.{layer}.{adapter_name}"
if layer != "lora_B":
state_dict[key] = (
state_dict[key][rank_idx] if rank != state_dict[key].shape[0] else state_dict[key]
)
else:
state_dict[key] = (
state_dict[key][:, rank_idx] if rank != state_dict[key].shape[1] else state_dict[key]
)
return state_dict
def update_and_allocate(self, global_step):
lora_config = self.peft_config[self.trainable_adapter_name]
# Update the importance score and allocate the budget
if global_step < lora_config.total_step - lora_config.tfinal:
_, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step)
if rank_pattern:
lora_config.rank_pattern = rank_pattern
# Finalize the budget allocation
elif global_step == lora_config.total_step - lora_config.tfinal:
_, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step, force_mask=True)
# for some reason, this freezes the trainable parameters and nothing gets updates
# self.resize_modules_by_rank_pattern(rank_pattern, self.trainable_adapter_name)
lora_config.rank_pattern = rank_pattern
self.rankallocator.reset_ipt()
# Currently using inefficient way to mask the unimportant weights using the rank pattern
# due to problem mentioned above
elif global_step > lora_config.total_step - lora_config.tfinal:
self.rankallocator.mask_using_rank_pattern(self.model, lora_config.rank_pattern)
# Pass the function and do forward propagation
else:
return None
@staticmethod
def _prepare_adalora_config(peft_config, model_config):
if peft_config.target_modules is None:
|
def is_bnb_available():
return importlib.util.find_spec("bitsandbytes") is not None
if is_bnb_available():
@dataclass
class AdaLoraConfig(LoraConfig):
"""
This is the configuration class to store the configuration of a [`~peft.AdaLora`].
Args:
target_r (`int`): The target average rank of incremental matrix.
init_r (`int`): The initial rank for each incremental matrix.
tinit (`int`): The steps of initial fine-tuning warmup.
tfinal (`int`): The step of final fine-tuning.
deltaT (`int`): The time internval between two budget allocations.
beta1 (`float`): The hyperparameter of EMA for sensitivity smoothing.
beta2 (`float`): The hyperparameter of EMA for undertainty quantification.
orth_reg_weight (`float`): The coefficient of orthogonal regularization.
total_step (`int`): The total training steps that should be specified before training.
rank_pattern (`list`): The allocated rank for each weight matrix by RankAllocator.
"""
target_r: int = field(default=8, metadata={"help": "Target Lora matrix dimension."})
init_r: int = field(default=12, metadata={"help": "Intial Lora matrix dimension."})
tinit: int = field(default=0, metadata={"help": "The steps of initial warmup."})
tfinal: int = field(default=0, metadata={"help": "The steps of final warmup."})
deltaT: int = field(default=1, metadata={"help": "Step interval of rank allocation."})
beta1: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."})
beta2: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."})
orth_reg_weight: float = field(default=0.5, metadata={"help": "The orthogonal regularization coefficient."})
total_step: Optional[int] = field(default=None, metadata={"help": "The total training steps."})
rank_pattern: Optional[dict] = field(default=None, metadata={"help": "The saved rank pattern."})
def __post_init__(self):
self.peft_type = PeftType.ADALORA
class AdaLoraModel(LoraModel):
"""
Creates AdaLoRA (Adaptive LoRA) model from a pretrained transformers model. Paper:
https://openreview.net/pdf?id=lq62uWRJjiY
Args:
model ([`transformers.PreTrainedModel`]): The model to be adapted.
config ([`AdaLoraConfig`]): The configuration of the AdaLora model.
Returns:
`torch.nn.Module`: The AdaLora model.
Example::
>>> from transformers import AutoModelForSeq2SeqLM, LoraConfig >>> from peft import AdaLoraModel, AdaLoraConfig
>>> config = AdaLoraConfig(
peft_type="ADALORA", task_type="SEQ_2_SEQ_LM", r=8, lora_alpha=32, target_modules=["q", "v"],
lora_dropout=0.01,
)
>>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> model = AdaLoraModel(config, model)
**Attributes**:
- **model** ([`transformers.PreTrainedModel`]) -- The model to be adapted.
- **peft_config** ([`AdaLoraConfig`]): The configuration of the AdaLora model.
"""
def __init__(self, model, config, adapter_name):
nn.Module.__init__(self)
self.model = model
self.peft_config = config
self.add_adapter(adapter_name, self.peft_config[adapter_name])
def add_adapter(self, adapter_name, config=None):
if config is not None:
model_config = self.model.config.to_dict() if hasattr(self.model.config, "to_dict") else self.model.config
config = self._prepare_adalora_config(config, model_config)
self.peft_config[adapter_name] = config
self._find_and_replace(adapter_name)
if len(self.peft_config) > 1 and self.peft_config[adapter_name].bias != "none":
raise ValueError(
"AdaLoraModel supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters."
)
traininable_mode_counter = 0
for config in self.peft_config.values():
if not config.inference_mode:
traininable_mode_counter += 1
if traininable_mode_counter > 1:
raise ValueError(
"AdaLoraModel supports only 1 trainable adapter. "
"When using multiple adapters, set inference_mode to True for all adapters except the one you want to train."
)
mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias)
if self.peft_config[adapter_name].inference_mode:
_freeze_adapter(self.model, adapter_name)
else:
self.trainable_adapter_name = adapter_name
self.rankallocator = RankAllocator(self.model, self.peft_config[adapter_name], self.trainable_adapter_name)
def _find_and_replace(self, adapter_name):
lora_config = self.peft_config[adapter_name]
loaded_in_8bit = getattr(self.model, "is_loaded_in_8bit", False)
if loaded_in_8bit and not is_bnb_available():
raise ImportError(
"To use Lora with 8-bit quantization, please install the `bitsandbytes` package. "
"You can install it with `pip install bitsandbytes`."
)
is_target_modules_in_base_model = False
kwargs = {
"r": lora_config.init_r,
"lora_alpha": lora_config.lora_alpha,
"lora_dropout": lora_config.lora_dropout,
"fan_in_fan_out": lora_config.fan_in_fan_out,
"init_lora_weights": lora_config.init_lora_weights,
}
key_list = [key for key, _ in self.model.named_modules()]
for key in key_list:
if isinstance(lora_config.target_modules, str):
target_module_found = re.fullmatch(lora_config.target_modules, key)
else:
target_module_found = any(key.endswith(target_key) for target_key in lora_config.target_modules)
if target_module_found:
if not is_target_modules_in_base_model:
is_target_modules_in_base_model = True
parent, target, target_name = _get_submodules(self.model, key)
bias = target.bias is not None
if isinstance(target, LoraLayer):
target.update_layer(
adapter_name,
lora_config.init_r,
lora_config.lora_alpha,
lora_config.lora_dropout,
lora_config.init_lora_weights,
)
else:
if loaded_in_8bit and isinstance(target, bnb.nn.Linear8bitLt):
kwargs.update(
{
"has_fp16_weights": target.state.has_fp16_weights,
"memory_efficient_backward": target.state.memory_efficient_backward,
"threshold": target.state.threshold,
"index": target.index,
}
)
new_module = SVDLinear8bitLt(
adapter_name, target.in_features, target.out_features, bias=bias, **kwargs
)
else:
if isinstance(target, torch.nn.Linear):
in_features, out_features = target.in_features, target.out_features
if kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. "
"Setting fan_in_fan_out to False."
)
kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False
elif isinstance(target, Conv1D):
in_features, out_features = (
target.weight.ds_shape if hasattr(target.weight, "ds_shape") else target.weight.shape
)
if not kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to False but the target module is `Conv1D`. "
"Setting fan_in_fan_out to True."
)
kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True
else:
raise ValueError(
f"Target module {target} is not supported. "
f"Currently, only `torch.nn.Linear` and `Conv1D` are supported."
)
new_module = SVDLinear(adapter_name, in_features, out_features, bias=bias, **kwargs)
self._replace_module(parent, target_name, new_module, target)
if not is_target_modules_in_base_model:
raise ValueError(
f"Target modules {lora_config.target_modules} not found in the base model. "
f"Please check the target modules and try again."
)
def __getattr__(self, name: str):
"""Forward missing attributes to the wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
return getattr(self.model, name)
def forward(self, *args, **kwargs):
outputs = self.model.forward(*args, **kwargs)
# Calculate the orthogonal regularization
orth_reg_weight = self.peft_config[self.trainable_adapter_name].orth_reg_weight
assert orth_reg_weight > 0
if hasattr(outputs, "loss"):
regu_loss = 0
num_param = 0
for n, p in self.model.named_parameters():
if ("lora_A" in n or "lora_B" in n) and self.trainable_adapter_name in n:
para_cov = p @ p.T if "lora_A" in n else p.T @ p
I = torch.eye(*para_cov.size(), out=torch.empty_like(para_cov))
I.requires_grad = False
num_param += 1
regu_loss += torch.norm(para_cov - I, p="fro")
regu_loss = regu_loss / num_param
outputs.loss += orth_reg_weight * regu_loss
return outputs
def resize_modules_by_rank_pattern(self, rank_pattern, adapter_name):
lora_config = self.peft_config[adapter_name]
for name, rank_idx in rank_pattern.items():
if isinstance(rank_idx, list):
rank = sum(rank_idx)
elif isinstance(rank_idx, torch.Tensor):
rank_idx = rank_idx.view(-1)
rank = rank_idx.sum().item()
else:
raise ValueError("Unexcepted type of rank_idx")
key = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1])
_, target, _ = _get_submodules(self.model, key)
lora_E_weights = target.lora_E[adapter_name][rank_idx]
lora_A_weights = target.lora_A[adapter_name][rank_idx]
lora_B_weights = target.lora_B[adapter_name][:, rank_idx]
ranknum = target.ranknum[adapter_name]
target.update_layer(
adapter_name,
rank,
lora_config.lora_alpha,
lora_config.lora_dropout,
lora_config.init_lora_weights,
)
with torch.no_grad():
if rank > 0:
target.lora_E[adapter_name].copy_(lora_E_weights)
target.lora_A[adapter_name].copy_(lora_A_weights)
target.lora_B[adapter_name].copy_(lora_B_weights)
# The scaling is exactly as the previous
target.ranknum[adapter_name].copy_(ranknum)
def resize_state_dict_by_rank_pattern(self, rank_pattern, state_dict, adapter_name):
for name, rank_idx in rank_pattern.items():
rank = sum(rank_idx)
prefix = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1])
for layer in ["lora_E", "lora_A", "lora_B"]:
key = f"base_model.model.{prefix}.{layer}.{adapter_name}"
if layer != "lora_B":
state_dict[key] = (
state_dict[key][rank_idx] if rank != state_dict[key].shape[0] else state_dict[key]
)
else:
state_dict[key] = (
state_dict[key][:, rank_idx] if rank != state_dict[key].shape[1] else state_dict[key]
)
return state_dict
def update_and_allocate(self, global_step):
lora_config = self.peft_config[self.trainable_adapter_name]
# Update the importance score and allocate the budget
if global_step < lora_config.total_step - lora_config.tfinal:
_, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step)
if rank_pattern:
lora_config.rank_pattern = rank_pattern
# Finalize the budget allocation
elif global_step == lora_config.total_step - lora_config.tfinal:
_, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step, force_mask=True)
# for some reason, this freezes the trainable parameters and nothing gets updates
# self.resize_modules_by_rank_pattern(rank_pattern, self.trainable_adapter_name)
lora_config.rank_pattern = rank_pattern
self.rankallocator.reset_ipt()
# Currently using inefficient way to mask the unimportant weights using the rank pattern
# due to problem mentioned above
elif global_step > lora_config.total_step - lora_config.tfinal:
self.rankallocator.mask_using_rank_pattern(self.model, lora_config.rank_pattern)
# Pass the function and do forward propagation
else:
return None
@staticmethod
def _prepare_adalora_config(peft_config, model_config):
if peft_config.target_modules is None: | if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING: | 1 | 2023-10-19 10:55:50+00:00 | 12k |
YuroFR/freqtrade-modded-crypto-trading-bot | tests/data/test_btanalysis.py | [
{
"identifier": "TimeRange",
"path": "freqtrade/configuration/timerange.py",
"snippet": "class TimeRange:\n \"\"\"\n object defining timerange inputs.\n [start/stop]type defines if [start/stop]ts shall be used.\n if *type is None, don't use corresponding startvalue.\n \"\"\"\n\n def __... | from datetime import datetime, timedelta, timezone
from pathlib import Path
from unittest.mock import MagicMock
from pandas import DataFrame, DateOffset, Timestamp, to_datetime
from freqtrade.configuration import TimeRange
from freqtrade.constants import LAST_BT_RESULT_FN
from freqtrade.data.btanalysis import (BT_DATA_COLUMNS, analyze_trade_parallelism,
extract_trades_of_period, get_latest_backtest_filename,
get_latest_hyperopt_file, load_backtest_data,
load_backtest_metadata, load_trades, load_trades_from_db)
from freqtrade.data.history import load_data, load_pair_history
from freqtrade.data.metrics import (calculate_cagr, calculate_calmar, calculate_csum,
calculate_expectancy, calculate_market_change,
calculate_max_drawdown, calculate_sharpe, calculate_sortino,
calculate_underwater, combine_dataframes_with_mean,
create_cum_profit)
from freqtrade.exceptions import OperationalException
from freqtrade.util import dt_utc
from tests.conftest import CURRENT_TEST_STRATEGY, create_mock_trades
from tests.conftest_trades import MOCK_TRADE_COUNT
import pytest | 10,229 | load_backtest_data(filename)
def test_load_backtest_data_new_format(testdatadir):
filename = testdatadir / "backtest_results/backtest-result.json"
bt_data = load_backtest_data(filename)
assert isinstance(bt_data, DataFrame)
assert set(bt_data.columns) == set(BT_DATA_COLUMNS)
assert len(bt_data) == 179
# Test loading from string (must yield same result)
bt_data2 = load_backtest_data(str(filename))
assert bt_data.equals(bt_data2)
# Test loading from folder (must yield same result)
bt_data3 = load_backtest_data(testdatadir / "backtest_results")
assert bt_data.equals(bt_data3)
with pytest.raises(ValueError, match=r"File .* does not exist\."):
load_backtest_data("filename" + "nofile")
with pytest.raises(ValueError, match=r"Unknown dataformat."):
load_backtest_data(testdatadir / "backtest_results" / LAST_BT_RESULT_FN)
def test_load_backtest_data_multi(testdatadir):
filename = testdatadir / "backtest_results/backtest-result_multistrat.json"
for strategy in ('StrategyTestV2', 'TestStrategy'):
bt_data = load_backtest_data(filename, strategy=strategy)
assert isinstance(bt_data, DataFrame)
assert set(bt_data.columns) == set(
BT_DATA_COLUMNS)
assert len(bt_data) == 179
# Test loading from string (must yield same result)
bt_data2 = load_backtest_data(str(filename), strategy=strategy)
assert bt_data.equals(bt_data2)
with pytest.raises(ValueError, match=r"Strategy XYZ not available in the backtest result\."):
load_backtest_data(filename, strategy='XYZ')
with pytest.raises(ValueError, match=r"Detected backtest result with more than one strategy.*"):
load_backtest_data(filename)
@pytest.mark.usefixtures("init_persistence")
@pytest.mark.parametrize('is_short', [False, True])
def test_load_trades_from_db(default_conf, fee, is_short, mocker):
create_mock_trades(fee, is_short)
# remove init so it does not init again
init_mock = mocker.patch('freqtrade.data.btanalysis.init_db', MagicMock())
trades = load_trades_from_db(db_url=default_conf['db_url'])
assert init_mock.call_count == 1
assert len(trades) == MOCK_TRADE_COUNT
assert isinstance(trades, DataFrame)
assert "pair" in trades.columns
assert "open_date" in trades.columns
assert "profit_ratio" in trades.columns
for col in BT_DATA_COLUMNS:
if col not in ['index', 'open_at_end']:
assert col in trades.columns
trades = load_trades_from_db(db_url=default_conf['db_url'], strategy=CURRENT_TEST_STRATEGY)
assert len(trades) == 4
trades = load_trades_from_db(db_url=default_conf['db_url'], strategy='NoneStrategy')
assert len(trades) == 0
def test_extract_trades_of_period(testdatadir):
pair = "UNITTEST/BTC"
# 2018-11-14 06:07:00
timerange = TimeRange('date', None, 1510639620, 0)
data = load_pair_history(pair=pair, timeframe='1m',
datadir=testdatadir, timerange=timerange)
trades = DataFrame(
{'pair': [pair, pair, pair, pair],
'profit_ratio': [0.0, 0.1, -0.2, -0.5],
'profit_abs': [0.0, 1, -2, -5],
'open_date': to_datetime([datetime(2017, 11, 13, 15, 40, 0, tzinfo=timezone.utc),
datetime(2017, 11, 14, 9, 41, 0, tzinfo=timezone.utc),
datetime(2017, 11, 14, 14, 20, 0, tzinfo=timezone.utc),
datetime(2017, 11, 15, 3, 40, 0, tzinfo=timezone.utc),
], utc=True
),
'close_date': to_datetime([datetime(2017, 11, 13, 16, 40, 0, tzinfo=timezone.utc),
datetime(2017, 11, 14, 10, 41, 0, tzinfo=timezone.utc),
datetime(2017, 11, 14, 15, 25, 0, tzinfo=timezone.utc),
datetime(2017, 11, 15, 3, 55, 0, tzinfo=timezone.utc),
], utc=True)
})
trades1 = extract_trades_of_period(data, trades)
# First and last trade are dropped as they are out of range
assert len(trades1) == 2
assert trades1.iloc[0].open_date == datetime(2017, 11, 14, 9, 41, 0, tzinfo=timezone.utc)
assert trades1.iloc[0].close_date == datetime(2017, 11, 14, 10, 41, 0, tzinfo=timezone.utc)
assert trades1.iloc[-1].open_date == datetime(2017, 11, 14, 14, 20, 0, tzinfo=timezone.utc)
assert trades1.iloc[-1].close_date == datetime(2017, 11, 14, 15, 25, 0, tzinfo=timezone.utc)
def test_analyze_trade_parallelism(testdatadir):
filename = testdatadir / "backtest_results/backtest-result.json"
bt_data = load_backtest_data(filename)
res = analyze_trade_parallelism(bt_data, "5m")
assert isinstance(res, DataFrame)
assert 'open_trades' in res.columns
assert res['open_trades'].max() == 3
assert res['open_trades'].min() == 0
def test_load_trades(default_conf, mocker):
db_mock = mocker.patch("freqtrade.data.btanalysis.load_trades_from_db", MagicMock())
bt_mock = mocker.patch("freqtrade.data.btanalysis.load_backtest_data", MagicMock())
|
def test_get_latest_backtest_filename(testdatadir, mocker):
with pytest.raises(ValueError, match=r"Directory .* does not exist\."):
get_latest_backtest_filename(testdatadir / 'does_not_exist')
with pytest.raises(ValueError,
match=r"Directory .* does not seem to contain .*"):
get_latest_backtest_filename(testdatadir)
testdir_bt = testdatadir / "backtest_results"
res = get_latest_backtest_filename(testdir_bt)
assert res == 'backtest-result.json'
res = get_latest_backtest_filename(str(testdir_bt))
assert res == 'backtest-result.json'
mocker.patch("freqtrade.data.btanalysis.json_load", return_value={})
with pytest.raises(ValueError, match=r"Invalid '.last_result.json' format."):
get_latest_backtest_filename(testdir_bt)
def test_get_latest_hyperopt_file(testdatadir):
res = get_latest_hyperopt_file(testdatadir / 'does_not_exist', 'testfile.pickle')
assert res == testdatadir / 'does_not_exist/testfile.pickle'
res = get_latest_hyperopt_file(testdatadir.parent)
assert res == testdatadir.parent / "hyperopt_results.pickle"
res = get_latest_hyperopt_file(str(testdatadir.parent))
assert res == testdatadir.parent / "hyperopt_results.pickle"
# Test with absolute path
with pytest.raises(
OperationalException,
match="--hyperopt-filename expects only the filename, not an absolute path."):
get_latest_hyperopt_file(str(testdatadir.parent), str(testdatadir.parent))
def test_load_backtest_metadata(mocker, testdatadir):
res = load_backtest_metadata(testdatadir / 'nonexistant.file.json')
assert res == {}
mocker.patch('freqtrade.data.btanalysis.get_backtest_metadata_filename')
mocker.patch('freqtrade.data.btanalysis.json_load', side_effect=Exception())
with pytest.raises(OperationalException,
match=r"Unexpected error.*loading backtest metadata\."):
load_backtest_metadata(testdatadir / 'nonexistant.file.json')
def test_load_backtest_data_old_format(testdatadir, mocker):
filename = testdatadir / "backtest-result_test222.json"
mocker.patch('freqtrade.data.btanalysis.load_backtest_stats', return_value=[])
with pytest.raises(OperationalException,
match=r"Backtest-results with only trades data are no longer supported."):
load_backtest_data(filename)
def test_load_backtest_data_new_format(testdatadir):
filename = testdatadir / "backtest_results/backtest-result.json"
bt_data = load_backtest_data(filename)
assert isinstance(bt_data, DataFrame)
assert set(bt_data.columns) == set(BT_DATA_COLUMNS)
assert len(bt_data) == 179
# Test loading from string (must yield same result)
bt_data2 = load_backtest_data(str(filename))
assert bt_data.equals(bt_data2)
# Test loading from folder (must yield same result)
bt_data3 = load_backtest_data(testdatadir / "backtest_results")
assert bt_data.equals(bt_data3)
with pytest.raises(ValueError, match=r"File .* does not exist\."):
load_backtest_data("filename" + "nofile")
with pytest.raises(ValueError, match=r"Unknown dataformat."):
load_backtest_data(testdatadir / "backtest_results" / LAST_BT_RESULT_FN)
def test_load_backtest_data_multi(testdatadir):
filename = testdatadir / "backtest_results/backtest-result_multistrat.json"
for strategy in ('StrategyTestV2', 'TestStrategy'):
bt_data = load_backtest_data(filename, strategy=strategy)
assert isinstance(bt_data, DataFrame)
assert set(bt_data.columns) == set(
BT_DATA_COLUMNS)
assert len(bt_data) == 179
# Test loading from string (must yield same result)
bt_data2 = load_backtest_data(str(filename), strategy=strategy)
assert bt_data.equals(bt_data2)
with pytest.raises(ValueError, match=r"Strategy XYZ not available in the backtest result\."):
load_backtest_data(filename, strategy='XYZ')
with pytest.raises(ValueError, match=r"Detected backtest result with more than one strategy.*"):
load_backtest_data(filename)
@pytest.mark.usefixtures("init_persistence")
@pytest.mark.parametrize('is_short', [False, True])
def test_load_trades_from_db(default_conf, fee, is_short, mocker):
create_mock_trades(fee, is_short)
# remove init so it does not init again
init_mock = mocker.patch('freqtrade.data.btanalysis.init_db', MagicMock())
trades = load_trades_from_db(db_url=default_conf['db_url'])
assert init_mock.call_count == 1
assert len(trades) == MOCK_TRADE_COUNT
assert isinstance(trades, DataFrame)
assert "pair" in trades.columns
assert "open_date" in trades.columns
assert "profit_ratio" in trades.columns
for col in BT_DATA_COLUMNS:
if col not in ['index', 'open_at_end']:
assert col in trades.columns
trades = load_trades_from_db(db_url=default_conf['db_url'], strategy=CURRENT_TEST_STRATEGY)
assert len(trades) == 4
trades = load_trades_from_db(db_url=default_conf['db_url'], strategy='NoneStrategy')
assert len(trades) == 0
def test_extract_trades_of_period(testdatadir):
pair = "UNITTEST/BTC"
# 2018-11-14 06:07:00
timerange = TimeRange('date', None, 1510639620, 0)
data = load_pair_history(pair=pair, timeframe='1m',
datadir=testdatadir, timerange=timerange)
trades = DataFrame(
{'pair': [pair, pair, pair, pair],
'profit_ratio': [0.0, 0.1, -0.2, -0.5],
'profit_abs': [0.0, 1, -2, -5],
'open_date': to_datetime([datetime(2017, 11, 13, 15, 40, 0, tzinfo=timezone.utc),
datetime(2017, 11, 14, 9, 41, 0, tzinfo=timezone.utc),
datetime(2017, 11, 14, 14, 20, 0, tzinfo=timezone.utc),
datetime(2017, 11, 15, 3, 40, 0, tzinfo=timezone.utc),
], utc=True
),
'close_date': to_datetime([datetime(2017, 11, 13, 16, 40, 0, tzinfo=timezone.utc),
datetime(2017, 11, 14, 10, 41, 0, tzinfo=timezone.utc),
datetime(2017, 11, 14, 15, 25, 0, tzinfo=timezone.utc),
datetime(2017, 11, 15, 3, 55, 0, tzinfo=timezone.utc),
], utc=True)
})
trades1 = extract_trades_of_period(data, trades)
# First and last trade are dropped as they are out of range
assert len(trades1) == 2
assert trades1.iloc[0].open_date == datetime(2017, 11, 14, 9, 41, 0, tzinfo=timezone.utc)
assert trades1.iloc[0].close_date == datetime(2017, 11, 14, 10, 41, 0, tzinfo=timezone.utc)
assert trades1.iloc[-1].open_date == datetime(2017, 11, 14, 14, 20, 0, tzinfo=timezone.utc)
assert trades1.iloc[-1].close_date == datetime(2017, 11, 14, 15, 25, 0, tzinfo=timezone.utc)
def test_analyze_trade_parallelism(testdatadir):
filename = testdatadir / "backtest_results/backtest-result.json"
bt_data = load_backtest_data(filename)
res = analyze_trade_parallelism(bt_data, "5m")
assert isinstance(res, DataFrame)
assert 'open_trades' in res.columns
assert res['open_trades'].max() == 3
assert res['open_trades'].min() == 0
def test_load_trades(default_conf, mocker):
db_mock = mocker.patch("freqtrade.data.btanalysis.load_trades_from_db", MagicMock())
bt_mock = mocker.patch("freqtrade.data.btanalysis.load_backtest_data", MagicMock())
| load_trades("DB", | 9 | 2023-10-21 10:02:05+00:00 | 12k |
yanzhh/HGERE | transformers/src/transformers/modeling_utils.py | [
{
"identifier": "get_activation",
"path": "transformers/src/transformers/activations.py",
"snippet": "def get_activation(activation_string):\n if activation_string in ACT2FN:\n return ACT2FN[activation_string]\n else:\n raise KeyError(\n \"function {} not found in ACT2FN m... | import logging
import os
import typing
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
from .activations import get_activation
from .configuration_utils import PretrainedConfig
from .file_utils import (
DUMMY_INPUTS,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
WEIGHTS_NAME,
cached_path,
hf_bucket_url,
is_remote_url,
)
from torch.nn import Identity
from transformers import load_tf2_checkpoint_in_pytorch_model | 8,543 | It is up to you to train those weights with a downstream fine-tuning task.
The warning ``Weights from XXX not used in YYY`` means that the layer XXX is not used by YYY, therefore those weights are discarded.
Parameters:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
- None if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``)
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) one of:
- an instance of a class derived from :class:`~transformers.PretrainedConfig`, or
- a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained()`
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
# For example purposes. Not runnable.
model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
state_dict = kwargs.pop("state_dict", None)
cache_dir = kwargs.pop("cache_dir", None)
from_tf = kwargs.pop("from_tf", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
# Load config if we don't provide a configuration
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
*model_args,
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]
elif os.path.isdir(pretrained_model_name_or_path):
if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")):
# Load from a TF 1.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
# Load from a TF 2.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
else:
raise EnvironmentError(
"Error no file named {} found in directory {} or `from_tf` set to False".format(
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + ".index"], pretrained_model_name_or_path
)
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
assert (
from_tf
), "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format(
pretrained_model_name_or_path + ".index"
)
archive_file = pretrained_model_name_or_path + ".index"
else:
| # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
logger = logging.getLogger(__name__)
try:
except ImportError:
# Older PyTorch compatibility
class Identity(nn.Module):
r"""A placeholder identity operator that is argument-insensitive.
"""
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, input):
return input
class ModuleUtilsMixin:
"""
A few utilities for torch.nn.Modules, to be used as a mixin.
"""
def num_parameters(self, only_trainable: bool = False) -> int:
"""
Get number of (optionally, trainable) parameters in the module.
"""
params = filter(lambda x: x.requires_grad, self.parameters()) if only_trainable else self.parameters()
return sum(p.numel() for p in params)
class PreTrainedModel(nn.Module, ModuleUtilsMixin):
r""" Base class for all models.
:class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods for loading/downloading/saving models
as well as a few methods common to all models to (i) resize the input embeddings and (ii) prune heads in the self-attention heads.
Class attributes (overridden by derived classes):
- ``config_class``: a class derived from :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
- ``pretrained_model_archive_map``: a python ``dict`` of with `short-cut-names` (string) as keys and `url` (string) of associated pretrained weights as values.
- ``load_tf_weights``: a python ``method`` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments:
- ``model``: an instance of the relevant subclass of :class:`~transformers.PreTrainedModel`,
- ``config``: an instance of the relevant subclass of :class:`~transformers.PretrainedConfig`,
- ``path``: a path (string) to the TensorFlow checkpoint.
- ``base_model_prefix``: a string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.
"""
config_class = None
pretrained_model_archive_map = {}
base_model_prefix = ""
@property
def dummy_inputs(self):
""" Dummy inputs to do a forward pass in the network.
Returns:
torch.Tensor with dummy inputs
"""
return {"input_ids": torch.tensor(DUMMY_INPUTS)}
def __init__(self, config, *inputs, **kwargs):
super().__init__()
if not isinstance(config, PretrainedConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
"To create a model from a pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
)
)
# Save config in model
self.config = config
@property
def base_model(self):
return getattr(self, self.base_model_prefix, self)
def get_input_embeddings(self):
"""
Returns the model's input embeddings.
Returns:
:obj:`nn.Module`:
A torch module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
return base_model.get_input_embeddings()
else:
raise NotImplementedError
def set_input_embeddings(self, value):
"""
Set model's input embeddings
Args:
value (:obj:`nn.Module`):
A module mapping vocabulary to hidden states.
"""
base_model = getattr(self, self.base_model_prefix, self)
if base_model is not self:
base_model.set_input_embeddings(value)
else:
raise NotImplementedError
def get_output_embeddings(self):
"""
Returns the model's output embeddings.
Returns:
:obj:`nn.Module`:
A torch module mapping hidden states to vocabulary.
"""
return None # Overwrite for models with output embeddings
def tie_weights(self):
"""
Tie the weights between the input embeddings and the output embeddings.
If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning
the weights instead.
"""
output_embeddings = self.get_output_embeddings()
if output_embeddings is not None:
if isinstance(output_embeddings, list):
for x in output_embeddings:
self._tie_or_clone_weights(x, self.get_input_embeddings())
else:
self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
def _tie_or_clone_weights(self, output_embeddings, input_embeddings):
""" Tie or clone module weights depending of weither we are using TorchScript or not
"""
if self.config.torchscript:
output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())
else:
output_embeddings.weight = input_embeddings.weight
if hasattr(output_embeddings, "bias") and output_embeddings.bias is not None:
output_embeddings.bias.data = torch.nn.functional.pad(
output_embeddings.bias.data,
(0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0]),
"constant",
0,
)
if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
output_embeddings.out_features = input_embeddings.num_embeddings
def resize_token_embeddings(self, new_num_tokens=None):
""" Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size.
Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
Arguments:
new_num_tokens: (`optional`) int:
New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end.
If not provided or None: does nothing and just returns a pointer to the input tokens ``torch.nn.Embeddings`` Module of the model.
Return: ``torch.nn.Embeddings``
Pointer to the input tokens Embeddings Module of the model
"""
base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed
model_embeds = base_model._resize_token_embeddings(new_num_tokens)
if new_num_tokens is None:
return model_embeds
# Update base model and current model config
self.config.vocab_size = new_num_tokens
base_model.vocab_size = new_num_tokens
# Tie weights again if needed
self.tie_weights()
return model_embeds
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.get_input_embeddings()
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.set_input_embeddings(new_embeddings)
return self.get_input_embeddings()
def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None):
""" Build a resized Embedding Module from a provided token Embedding Module.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
Args:
new_num_tokens: (`optional`) int
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end
Reducing the size will remove vectors from the end
If not provided or None: return the provided token Embedding Module.
Return: ``torch.nn.Embeddings``
Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None
"""
if new_num_tokens is None:
return old_embeddings
old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
if old_num_tokens == new_num_tokens:
return old_embeddings
# Build new embeddings
new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)
new_embeddings.to(old_embeddings.weight.device)
# initialize all new embeddings (in particular added tokens)
self._init_weights(new_embeddings)
# Copy word embeddings from the previous weights
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]
return new_embeddings
def init_weights(self):
""" Initialize and prunes weights if needed. """
# Initialize weights
self.apply(self._init_weights)
# Prune heads if needed
if self.config.pruned_heads:
self.prune_heads(self.config.pruned_heads)
# Tie weights if needed
self.tie_weights()
def prune_heads(self, heads_to_prune):
""" Prunes heads of the base model.
Arguments:
heads_to_prune: dict with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`).
E.g. {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
"""
# save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
for layer, heads in heads_to_prune.items():
union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON
self.base_model._prune_heads(heads_to_prune)
def save_pretrained(self, save_directory):
""" Save a model and its configuration file to a directory, so that it
can be re-loaded using the `:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
"""
assert os.path.isdir(
save_directory
), "Saving path should be a directory where the model and configuration can be saved"
# Only save the model itself if we are using distributed training
model_to_save = self.module if hasattr(self, "module") else self
# Attach architecture to the config
model_to_save.config.architectures = [model_to_save.__class__.__name__]
# Save configuration file
model_to_save.config.save_pretrained(save_directory)
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
logger.info("Model weights saved in {}".format(output_model_file))
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""Instantiate a pretrained pytorch model from a pre-trained model configuration.
The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated)
To train the model, you should first set it back in training mode with ``model.train()``
The warning ``Weights from XXX not initialized from pretrained model`` means that the weights of XXX do not come pre-trained with the rest of the model.
It is up to you to train those weights with a downstream fine-tuning task.
The warning ``Weights from XXX not used in YYY`` means that the layer XXX is not used by YYY, therefore those weights are discarded.
Parameters:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
- a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
- a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
- None if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``)
model_args: (`optional`) Sequence of positional arguments:
All remaning positional arguments will be passed to the underlying model's ``__init__`` method
config: (`optional`) one of:
- an instance of a class derived from :class:`~transformers.PretrainedConfig`, or
- a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained()`
Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:
- the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
- the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
- the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.
state_dict: (`optional`) dict:
an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own weights.
In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded pre-trained model
configuration should be cached if the standard cache should not be used.
force_download: (`optional`) boolean, default False:
Force to (re-)download the model weights and configuration files and override the cached versions if they exists.
resume_download: (`optional`) boolean, default False:
Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.
proxies: (`optional`) dict, default None:
A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
output_loading_info: (`optional`) boolean:
Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
kwargs: (`optional`) Remaining dictionary of keyword arguments:
Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
Examples::
# For example purposes. Not runnable.
model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.
model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`
model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading
assert model.config.output_attention == True
# Loading from a TF checkpoint file instead of a PyTorch model (slower)
config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
state_dict = kwargs.pop("state_dict", None)
cache_dir = kwargs.pop("cache_dir", None)
from_tf = kwargs.pop("from_tf", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
# Load config if we don't provide a configuration
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
*model_args,
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]
elif os.path.isdir(pretrained_model_name_or_path):
if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")):
# Load from a TF 1.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
# Load from a TF 2.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
else:
raise EnvironmentError(
"Error no file named {} found in directory {} or `from_tf` set to False".format(
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + ".index"], pretrained_model_name_or_path
)
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
assert (
from_tf
), "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format(
pretrained_model_name_or_path + ".index"
)
archive_file = pretrained_model_name_or_path + ".index"
else: | archive_file = hf_bucket_url( | 7 | 2023-10-15 02:31:09+00:00 | 12k |
generative-skill-chaining/gsc-code | generative_skill_chaining/envs/pybullet/table/predicates.py | [
{
"identifier": "primitive_actions",
"path": "generative_skill_chaining/envs/pybullet/table/primitive_actions.py",
"snippet": "class PrimitiveAction:\nclass PickAction(PrimitiveAction):\nclass PlaceAction(PrimitiveAction):\nclass PullAction(PrimitiveAction):\nclass PushAction(PrimitiveAction):\n RANG... | import dataclasses
import random
import numpy as np
import pybullet as p
import symbolic
from typing import Optional, Dict, List, Sequence, Tuple, Type
from ctrlutils import eigen
from shapely.geometry import Polygon, LineString
from generative_skill_chaining.envs.pybullet.table import primitive_actions, utils
from generative_skill_chaining.envs.pybullet.table.objects import Box, Hook, Null, Object, Rack
from generative_skill_chaining.envs.pybullet.sim import math
from generative_skill_chaining.envs.pybullet.sim.robot import Robot | 9,389 |
dbprint = lambda *args: None # noqa
# dbprint = print
@dataclasses.dataclass
class Predicate:
args: List[str]
@classmethod
def create(cls, proposition: str) -> "Predicate":
predicate, args = symbolic.parse_proposition(proposition)
predicate_classes = {
name.lower(): predicate_class for name, predicate_class in globals().items()
}
predicate_class = predicate_classes[predicate]
return predicate_class(args)
def sample(
self, robot: Robot, objects: Dict[str, Object], state: Sequence["Predicate"]
) -> bool:
"""Generates a geometric grounding of a predicate."""
return True
def value(
self, robot: Robot, objects: Dict[str, Object], state: Sequence["Predicate"]
) -> bool:
"""Evaluates to True if the geometrically grounded predicate is satisfied."""
return True
def get_arg_objects(self, objects: Dict[str, Object]) -> List[Object]:
return [objects[arg] for arg in self.args]
def __str__(self) -> str:
return f"{type(self).__name__.lower()}({', '.join(self.args)})"
def __hash__(self) -> int:
return hash(str(self))
def __eq__(self, other) -> bool:
return str(self) == str(other)
class HandleGrasp(Predicate):
"""Unary predicate enforcing a handle grasp towards the tail end on a hook object."""
pass
class UpperHandleGrasp(Predicate):
"""Unary predicate enforcing a handle grasp towards the head on a hook object."""
pass
class Free(Predicate):
"""Unary predicate enforcing that no top-down occlusions exist on the object."""
DISTANCE_MIN: Dict[Tuple[Type[Object], Type[Object]], float] = {
(Box, Box): 0.05,
(Box, Hook): 0.05,
|
dbprint = lambda *args: None # noqa
# dbprint = print
@dataclasses.dataclass
class Predicate:
args: List[str]
@classmethod
def create(cls, proposition: str) -> "Predicate":
predicate, args = symbolic.parse_proposition(proposition)
predicate_classes = {
name.lower(): predicate_class for name, predicate_class in globals().items()
}
predicate_class = predicate_classes[predicate]
return predicate_class(args)
def sample(
self, robot: Robot, objects: Dict[str, Object], state: Sequence["Predicate"]
) -> bool:
"""Generates a geometric grounding of a predicate."""
return True
def value(
self, robot: Robot, objects: Dict[str, Object], state: Sequence["Predicate"]
) -> bool:
"""Evaluates to True if the geometrically grounded predicate is satisfied."""
return True
def get_arg_objects(self, objects: Dict[str, Object]) -> List[Object]:
return [objects[arg] for arg in self.args]
def __str__(self) -> str:
return f"{type(self).__name__.lower()}({', '.join(self.args)})"
def __hash__(self) -> int:
return hash(str(self))
def __eq__(self, other) -> bool:
return str(self) == str(other)
class HandleGrasp(Predicate):
"""Unary predicate enforcing a handle grasp towards the tail end on a hook object."""
pass
class UpperHandleGrasp(Predicate):
"""Unary predicate enforcing a handle grasp towards the head on a hook object."""
pass
class Free(Predicate):
"""Unary predicate enforcing that no top-down occlusions exist on the object."""
DISTANCE_MIN: Dict[Tuple[Type[Object], Type[Object]], float] = {
(Box, Box): 0.05,
(Box, Hook): 0.05, | (Box, Rack): 0.1, | 6 | 2023-10-16 00:22:40+00:00 | 12k |
akashgreninja/GreSec | backend/venv/lib/python3.10/site-packages/pydantic/json_schema.py | [
{
"identifier": "_config",
"path": "backend/venv/lib/python3.10/site-packages/pydantic/_internal/_config.py",
"snippet": "DEPRECATION_MESSAGE = 'Support for class-based `config` is deprecated, use ConfigDict instead.'\nV2_REMOVED_KEYS = {\n 'allow_mutation',\n 'error_msg_templates',\n 'fields',... | import dataclasses
import inspect
import math
import re
import warnings
import pydantic_core
from collections import defaultdict
from copy import deepcopy
from dataclasses import is_dataclass
from enum import Enum
from typing import (
TYPE_CHECKING,
Any,
Callable,
Counter,
Dict,
Hashable,
Iterable,
List,
NewType,
Sequence,
Tuple,
TypeVar,
Union,
cast,
)
from pydantic_core import CoreSchema, PydanticOmit, core_schema, to_jsonable_python
from pydantic_core.core_schema import ComputedField
from typing_extensions import Annotated, Literal, assert_never
from ._internal import (
_config,
_core_metadata,
_core_utils,
_decorators,
_internal_dataclass,
_mock_val_ser,
_schema_generation_shared,
_typing_extra,
)
from .annotated_handlers import GetJsonSchemaHandler
from .config import JsonSchemaExtraCallable
from .errors import PydanticInvalidForJsonSchema, PydanticUserError
from . import ConfigDict
from ._internal._core_utils import CoreSchemaField, CoreSchemaOrField
from ._internal._dataclasses import PydanticDataclass
from ._internal._schema_generation_shared import GetJsonSchemaFunction
from .main import BaseModel | 7,407 | field_json_schema = self.generate_inner(field).copy()
except PydanticOmit:
continue
if 'title' not in field_json_schema and self.field_title_should_be_set(field):
title = self.get_title_from_name(name)
field_json_schema['title'] = title
field_json_schema = self.handle_ref_overrides(field_json_schema)
properties[name] = field_json_schema
if required:
required_fields.append(name)
json_schema = {'type': 'object', 'properties': properties}
if required_fields:
json_schema['required'] = required_fields
return json_schema
def _get_alias_name(self, field: CoreSchemaField, name: str) -> str:
if field['type'] == 'computed-field':
alias: Any = field.get('alias', name)
elif self.mode == 'validation':
alias = field.get('validation_alias', name)
else:
alias = field.get('serialization_alias', name)
if isinstance(alias, str):
name = alias
elif isinstance(alias, list):
alias = cast('list[str] | str', alias)
for path in alias:
if isinstance(path, list) and len(path) == 1 and isinstance(path[0], str):
# Use the first valid single-item string path; the code that constructs the alias array
# should ensure the first such item is what belongs in the JSON schema
name = path[0]
break
else:
assert_never(alias)
return name
def typed_dict_field_schema(self, schema: core_schema.TypedDictField) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that defines a typed dict field.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self.generate_inner(schema['schema'])
def dataclass_field_schema(self, schema: core_schema.DataclassField) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that defines a dataclass field.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self.generate_inner(schema['schema'])
def model_field_schema(self, schema: core_schema.ModelField) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that defines a model field.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self.generate_inner(schema['schema'])
def computed_field_schema(self, schema: core_schema.ComputedField) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that defines a computed field.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self.generate_inner(schema['return_schema'])
def model_schema(self, schema: core_schema.ModelSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that defines a model.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
# We do not use schema['model'].model_json_schema() here
# because it could lead to inconsistent refs handling, etc.
cls = cast('type[BaseModel]', schema['cls'])
config = cls.model_config
title = config.get('title')
with self._config_wrapper_stack.push(config):
json_schema = self.generate_inner(schema['schema'])
json_schema_extra = config.get('json_schema_extra')
if cls.__pydantic_root_model__:
root_json_schema_extra = cls.model_fields['root'].json_schema_extra
if json_schema_extra and root_json_schema_extra:
raise ValueError(
'"model_config[\'json_schema_extra\']" and "Field.json_schema_extra" on "RootModel.root"'
' field must not be set simultaneously'
)
if root_json_schema_extra:
json_schema_extra = root_json_schema_extra
json_schema = self._update_class_schema(json_schema, title, config.get('extra', None), cls, json_schema_extra)
return json_schema
def _update_class_schema(
self,
json_schema: JsonSchemaValue,
title: str | None,
extra: Literal['allow', 'ignore', 'forbid'] | None,
cls: type[Any],
| """
The `json_schema` module contains classes and functions to allow the way [JSON Schema](https://json-schema.org/)
is generated to be customized.
In general you shouldn't need to use this module directly; instead, you can
[`BaseModel.model_json_schema`][pydantic.BaseModel.model_json_schema] and
[`TypeAdapter.json_schema`][pydantic.TypeAdapter.json_schema].
"""
from __future__ import annotations as _annotations
if TYPE_CHECKING:
CoreSchemaOrFieldType = Literal[core_schema.CoreSchemaType, core_schema.CoreSchemaFieldType]
"""
A type alias for defined schema types that represents a union of
`core_schema.CoreSchemaType` and
`core_schema.CoreSchemaFieldType`.
"""
JsonSchemaValue = Dict[str, Any]
"""
A type alias for a JSON schema value. This is a dictionary of string keys to arbitrary values.
"""
JsonSchemaMode = Literal['validation', 'serialization']
"""
A type alias that represents the mode of a JSON schema; either 'validation' or 'serialization'.
For some types, the inputs to validation differ from the outputs of serialization. For example,
computed fields will only be present when serializing, and should not be provided when
validating. This flag provides a way to indicate whether you want the JSON schema required
for validation inputs, or that will be matched by serialization outputs.
"""
_MODE_TITLE_MAPPING: dict[JsonSchemaMode, str] = {'validation': 'Input', 'serialization': 'Output'}
def update_json_schema(schema: JsonSchemaValue, updates: dict[str, Any]) -> JsonSchemaValue:
"""Update a JSON schema by providing a dictionary of updates.
This function sets the provided key-value pairs in the schema and returns the updated schema.
Args:
schema: The JSON schema to update.
updates: A dictionary of key-value pairs to set in the schema.
Returns:
The updated JSON schema.
"""
schema.update(updates)
return schema
JsonSchemaWarningKind = Literal['skipped-choice', 'non-serializable-default']
"""
A type alias representing the kinds of warnings that can be emitted during JSON schema generation.
See [`GenerateJsonSchema.render_warning_message`][pydantic.json_schema.GenerateJsonSchema.render_warning_message]
for more details.
"""
class PydanticJsonSchemaWarning(UserWarning):
"""This class is used to emit warnings produced during JSON schema generation.
See the [`GenerateJsonSchema.emit_warning`][pydantic.json_schema.GenerateJsonSchema.emit_warning] and
[`GenerateJsonSchema.render_warning_message`][pydantic.json_schema.GenerateJsonSchema.render_warning_message]
methods for more details; these can be overridden to control warning behavior.
"""
# ##### JSON Schema Generation #####
DEFAULT_REF_TEMPLATE = '#/$defs/{model}'
"""The default format string used to generate reference names."""
# There are three types of references relevant to building JSON schemas:
# 1. core_schema "ref" values; these are not exposed as part of the JSON schema
# * these might look like the fully qualified path of a model, its id, or something similar
CoreRef = NewType('CoreRef', str)
# 2. keys of the "definitions" object that will eventually go into the JSON schema
# * by default, these look like "MyModel", though may change in the presence of collisions
# * eventually, we may want to make it easier to modify the way these names are generated
DefsRef = NewType('DefsRef', str)
# 3. the values corresponding to the "$ref" key in the schema
# * By default, these look like "#/$defs/MyModel", as in {"$ref": "#/$defs/MyModel"}
JsonRef = NewType('JsonRef', str)
CoreModeRef = Tuple[CoreRef, JsonSchemaMode]
JsonSchemaKeyT = TypeVar('JsonSchemaKeyT', bound=Hashable)
@dataclasses.dataclass(**_internal_dataclass.slots_true)
class _DefinitionsRemapping:
defs_remapping: dict[DefsRef, DefsRef]
json_remapping: dict[JsonRef, JsonRef]
@staticmethod
def from_prioritized_choices(
prioritized_choices: dict[DefsRef, list[DefsRef]],
defs_to_json: dict[DefsRef, JsonRef],
definitions: dict[DefsRef, JsonSchemaValue],
) -> _DefinitionsRemapping:
"""
This function should produce a remapping that replaces complex DefsRef with the simpler ones from the
prioritized_choices such that applying the name remapping would result in an equivalent JSON schema.
"""
# We need to iteratively simplify the definitions until we reach a fixed point.
# The reason for this is that outer definitions may reference inner definitions that get simplified
# into an equivalent reference, and the outer definitions won't be equivalent until we've simplified
# the inner definitions.
copied_definitions = deepcopy(definitions)
definitions_schema = {'$defs': copied_definitions}
for _iter in range(100): # prevent an infinite loop in the case of a bug, 100 iterations should be enough
# For every possible remapped DefsRef, collect all schemas that that DefsRef might be used for:
schemas_for_alternatives: dict[DefsRef, list[JsonSchemaValue]] = defaultdict(list)
for defs_ref in copied_definitions:
alternatives = prioritized_choices[defs_ref]
for alternative in alternatives:
schemas_for_alternatives[alternative].append(copied_definitions[defs_ref])
# Deduplicate the schemas for each alternative; the idea is that we only want to remap to a new DefsRef
# if it introduces no ambiguity, i.e., there is only one distinct schema for that DefsRef.
for defs_ref, schemas in schemas_for_alternatives.items():
schemas_for_alternatives[defs_ref] = _deduplicate_schemas(schemas_for_alternatives[defs_ref])
# Build the remapping
defs_remapping: dict[DefsRef, DefsRef] = {}
json_remapping: dict[JsonRef, JsonRef] = {}
for original_defs_ref in definitions:
alternatives = prioritized_choices[original_defs_ref]
# Pick the first alternative that has only one schema, since that means there is no collision
remapped_defs_ref = next(x for x in alternatives if len(schemas_for_alternatives[x]) == 1)
defs_remapping[original_defs_ref] = remapped_defs_ref
json_remapping[defs_to_json[original_defs_ref]] = defs_to_json[remapped_defs_ref]
remapping = _DefinitionsRemapping(defs_remapping, json_remapping)
new_definitions_schema = remapping.remap_json_schema({'$defs': copied_definitions})
if definitions_schema == new_definitions_schema:
# We've reached the fixed point
return remapping
definitions_schema = new_definitions_schema
raise PydanticInvalidForJsonSchema('Failed to simplify the JSON schema definitions')
def remap_defs_ref(self, ref: DefsRef) -> DefsRef:
return self.defs_remapping.get(ref, ref)
def remap_json_ref(self, ref: JsonRef) -> JsonRef:
return self.json_remapping.get(ref, ref)
def remap_json_schema(self, schema: Any) -> Any:
"""
Recursively update the JSON schema replacing all $refs
"""
if isinstance(schema, str):
# Note: this may not really be a JsonRef; we rely on having no collisions between JsonRefs and other strings
return self.remap_json_ref(JsonRef(schema))
elif isinstance(schema, list):
return [self.remap_json_schema(item) for item in schema]
elif isinstance(schema, dict):
for key, value in schema.items():
if key == '$ref' and isinstance(value, str):
schema['$ref'] = self.remap_json_ref(JsonRef(value))
elif key == '$defs':
schema['$defs'] = {
self.remap_defs_ref(DefsRef(key)): self.remap_json_schema(value)
for key, value in schema['$defs'].items()
}
else:
schema[key] = self.remap_json_schema(value)
return schema
class GenerateJsonSchema:
"""A class for generating JSON schemas.
This class generates JSON schemas based on configured parameters. The default schema dialect
is [https://json-schema.org/draft/2020-12/schema](https://json-schema.org/draft/2020-12/schema).
The class uses `by_alias` to configure how fields with
multiple names are handled and `ref_template` to format reference names.
Attributes:
schema_dialect: The JSON schema dialect used to generate the schema. See
[Declaring a Dialect](https://json-schema.org/understanding-json-schema/reference/schema.html#id4)
in the JSON Schema documentation for more information about dialects.
ignored_warning_kinds: Warnings to ignore when generating the schema. `self.render_warning_message` will
do nothing if its argument `kind` is in `ignored_warning_kinds`;
this value can be modified on subclasses to easily control which warnings are emitted.
by_alias: Whether or not to use field names when generating the schema.
ref_template: The format string used when generating reference names.
core_to_json_refs: A mapping of core refs to JSON refs.
core_to_defs_refs: A mapping of core refs to definition refs.
defs_to_core_refs: A mapping of definition refs to core refs.
json_to_defs_refs: A mapping of JSON refs to definition refs.
definitions: Definitions in the schema.
collisions: Definitions with colliding names. When collisions are detected, we choose a non-colliding
name during generation, but we also track the colliding tag so that it can be remapped for the first
occurrence at the end of the process.
defs_ref_fallbacks: Core refs to fallback definitions refs.
_schema_type_to_method: A mapping of schema types to generator methods.
_used: Set to `True` after generating a schema to avoid re-use issues.
mode: The schema mode.
Args:
by_alias: Whether or not to include field names.
ref_template: The format string to use when generating reference names.
Raises:
JsonSchemaError: If the instance of the class is inadvertently re-used after generating a schema.
"""
schema_dialect = 'https://json-schema.org/draft/2020-12/schema'
# `self.render_warning_message` will do nothing if its argument `kind` is in `ignored_warning_kinds`;
# this value can be modified on subclasses to easily control which warnings are emitted
ignored_warning_kinds: set[JsonSchemaWarningKind] = {'skipped-choice'}
def __init__(self, by_alias: bool = True, ref_template: str = DEFAULT_REF_TEMPLATE):
self.by_alias = by_alias
self.ref_template = ref_template
self.core_to_json_refs: dict[CoreModeRef, JsonRef] = {}
self.core_to_defs_refs: dict[CoreModeRef, DefsRef] = {}
self.defs_to_core_refs: dict[DefsRef, CoreModeRef] = {}
self.json_to_defs_refs: dict[JsonRef, DefsRef] = {}
self.definitions: dict[DefsRef, JsonSchemaValue] = {}
self._config_wrapper_stack = _config.ConfigWrapperStack(_config.ConfigWrapper({}))
self._mode: JsonSchemaMode = 'validation'
# The following includes a mapping of a fully-unique defs ref choice to a list of preferred
# alternatives, which are generally simpler, such as only including the class name.
# At the end of schema generation, we use these to produce a JSON schema with more human-readable
# definitions, which would also work better in a generated OpenAPI client, etc.
self._prioritized_defsref_choices: dict[DefsRef, list[DefsRef]] = {}
self._collision_counter: dict[str, int] = defaultdict(int)
self._collision_index: dict[str, int] = {}
self._schema_type_to_method = self.build_schema_type_to_method()
# When we encounter definitions we need to try to build them immediately
# so that they are available schemas that reference them
# But it's possible that CoreSchema was never going to be used
# (e.g. because the CoreSchema that references short circuits is JSON schema generation without needing
# the reference) so instead of failing altogether if we can't build a definition we
# store the error raised and re-throw it if we end up needing that def
self._core_defs_invalid_for_json_schema: dict[DefsRef, PydanticInvalidForJsonSchema] = {}
# This changes to True after generating a schema, to prevent issues caused by accidental re-use
# of a single instance of a schema generator
self._used = False
@property
def _config(self) -> _config.ConfigWrapper:
return self._config_wrapper_stack.tail
@property
def mode(self) -> JsonSchemaMode:
if self._config.json_schema_mode_override is not None:
return self._config.json_schema_mode_override
else:
return self._mode
def build_schema_type_to_method(
self,
) -> dict[CoreSchemaOrFieldType, Callable[[CoreSchemaOrField], JsonSchemaValue]]:
"""Builds a dictionary mapping fields to methods for generating JSON schemas.
Returns:
A dictionary containing the mapping of `CoreSchemaOrFieldType` to a handler method.
Raises:
TypeError: If no method has been defined for generating a JSON schema for a given pydantic core schema type.
"""
mapping: dict[CoreSchemaOrFieldType, Callable[[CoreSchemaOrField], JsonSchemaValue]] = {}
core_schema_types: list[CoreSchemaOrFieldType] = _typing_extra.all_literal_values(
CoreSchemaOrFieldType # type: ignore
)
for key in core_schema_types:
method_name = f"{key.replace('-', '_')}_schema"
try:
mapping[key] = getattr(self, method_name)
except AttributeError as e: # pragma: no cover
raise TypeError(
f'No method for generating JsonSchema for core_schema.type={key!r} '
f'(expected: {type(self).__name__}.{method_name})'
) from e
return mapping
def generate_definitions(
self, inputs: Sequence[tuple[JsonSchemaKeyT, JsonSchemaMode, core_schema.CoreSchema]]
) -> tuple[dict[tuple[JsonSchemaKeyT, JsonSchemaMode], JsonSchemaValue], dict[DefsRef, JsonSchemaValue]]:
"""Generates JSON schema definitions from a list of core schemas, pairing the generated definitions with a
mapping that links the input keys to the definition references.
Args:
inputs: A sequence of tuples, where:
- The first element is a JSON schema key type.
- The second element is the JSON mode: either 'validation' or 'serialization'.
- The third element is a core schema.
Returns:
A tuple where:
- The first element is a dictionary whose keys are tuples of JSON schema key type and JSON mode, and
whose values are the JSON schema corresponding to that pair of inputs. (These schemas may have
JsonRef references to definitions that are defined in the second returned element.)
- The second element is a dictionary whose keys are definition references for the JSON schemas
from the first returned element, and whose values are the actual JSON schema definitions.
Raises:
PydanticUserError: Raised if the JSON schema generator has already been used to generate a JSON schema.
"""
if self._used:
raise PydanticUserError(
'This JSON schema generator has already been used to generate a JSON schema. '
f'You must create a new instance of {type(self).__name__} to generate a new JSON schema.',
code='json-schema-already-used',
)
for key, mode, schema in inputs:
self._mode = mode
self.generate_inner(schema)
definitions_remapping = self._build_definitions_remapping()
json_schemas_map: dict[tuple[JsonSchemaKeyT, JsonSchemaMode], DefsRef] = {}
for key, mode, schema in inputs:
self._mode = mode
json_schema = self.generate_inner(schema)
json_schemas_map[(key, mode)] = definitions_remapping.remap_json_schema(json_schema)
json_schema = {'$defs': self.definitions}
json_schema = definitions_remapping.remap_json_schema(json_schema)
self._used = True
return json_schemas_map, _sort_json_schema(json_schema['$defs']) # type: ignore
def generate(self, schema: CoreSchema, mode: JsonSchemaMode = 'validation') -> JsonSchemaValue:
"""Generates a JSON schema for a specified schema in a specified mode.
Args:
schema: A Pydantic model.
mode: The mode in which to generate the schema. Defaults to 'validation'.
Returns:
A JSON schema representing the specified schema.
Raises:
PydanticUserError: If the JSON schema generator has already been used to generate a JSON schema.
"""
self._mode = mode
if self._used:
raise PydanticUserError(
'This JSON schema generator has already been used to generate a JSON schema. '
f'You must create a new instance of {type(self).__name__} to generate a new JSON schema.',
code='json-schema-already-used',
)
json_schema: JsonSchemaValue = self.generate_inner(schema)
json_ref_counts = self.get_json_ref_counts(json_schema)
# Remove the top-level $ref if present; note that the _generate method already ensures there are no sibling keys
ref = cast(JsonRef, json_schema.get('$ref'))
while ref is not None: # may need to unpack multiple levels
ref_json_schema = self.get_schema_from_definitions(ref)
if json_ref_counts[ref] > 1 or ref_json_schema is None:
# Keep the ref, but use an allOf to remove the top level $ref
json_schema = {'allOf': [{'$ref': ref}]}
else:
# "Unpack" the ref since this is the only reference
json_schema = ref_json_schema.copy() # copy to prevent recursive dict reference
json_ref_counts[ref] -= 1
ref = cast(JsonRef, json_schema.get('$ref'))
self._garbage_collect_definitions(json_schema)
definitions_remapping = self._build_definitions_remapping()
if self.definitions:
json_schema['$defs'] = self.definitions
json_schema = definitions_remapping.remap_json_schema(json_schema)
# For now, we will not set the $schema key. However, if desired, this can be easily added by overriding
# this method and adding the following line after a call to super().generate(schema):
# json_schema['$schema'] = self.schema_dialect
self._used = True
return _sort_json_schema(json_schema)
def generate_inner(self, schema: CoreSchemaOrField) -> JsonSchemaValue: # noqa: C901
"""Generates a JSON schema for a given core schema.
Args:
schema: The given core schema.
Returns:
The generated JSON schema.
"""
# If a schema with the same CoreRef has been handled, just return a reference to it
# Note that this assumes that it will _never_ be the case that the same CoreRef is used
# on types that should have different JSON schemas
if 'ref' in schema:
core_ref = CoreRef(schema['ref']) # type: ignore[typeddict-item]
core_mode_ref = (core_ref, self.mode)
if core_mode_ref in self.core_to_defs_refs and self.core_to_defs_refs[core_mode_ref] in self.definitions:
return {'$ref': self.core_to_json_refs[core_mode_ref]}
# Generate the JSON schema, accounting for the json_schema_override and core_schema_override
metadata_handler = _core_metadata.CoreMetadataHandler(schema)
def populate_defs(core_schema: CoreSchema, json_schema: JsonSchemaValue) -> JsonSchemaValue:
if 'ref' in core_schema:
core_ref = CoreRef(core_schema['ref']) # type: ignore[typeddict-item]
defs_ref, ref_json_schema = self.get_cache_defs_ref_schema(core_ref)
json_ref = JsonRef(ref_json_schema['$ref'])
self.json_to_defs_refs[json_ref] = defs_ref
# Replace the schema if it's not a reference to itself
# What we want to avoid is having the def be just a ref to itself
# which is what would happen if we blindly assigned any
if json_schema.get('$ref', None) != json_ref:
self.definitions[defs_ref] = json_schema
self._core_defs_invalid_for_json_schema.pop(defs_ref, None)
json_schema = ref_json_schema
return json_schema
def convert_to_all_of(json_schema: JsonSchemaValue) -> JsonSchemaValue:
if '$ref' in json_schema and len(json_schema.keys()) > 1:
# technically you can't have any other keys next to a "$ref"
# but it's an easy mistake to make and not hard to correct automatically here
json_schema = json_schema.copy()
ref = json_schema.pop('$ref')
json_schema = {'allOf': [{'$ref': ref}], **json_schema}
return json_schema
def handler_func(schema_or_field: CoreSchemaOrField) -> JsonSchemaValue:
"""Generate a JSON schema based on the input schema.
Args:
schema_or_field: The core schema to generate a JSON schema from.
Returns:
The generated JSON schema.
Raises:
TypeError: If an unexpected schema type is encountered.
"""
# Generate the core-schema-type-specific bits of the schema generation:
json_schema: JsonSchemaValue | None = None
if self.mode == 'serialization' and 'serialization' in schema_or_field:
ser_schema = schema_or_field['serialization'] # type: ignore
json_schema = self.ser_schema(ser_schema)
if json_schema is None:
if _core_utils.is_core_schema(schema_or_field) or _core_utils.is_core_schema_field(schema_or_field):
generate_for_schema_type = self._schema_type_to_method[schema_or_field['type']]
json_schema = generate_for_schema_type(schema_or_field)
else:
raise TypeError(f'Unexpected schema type: schema={schema_or_field}')
if _core_utils.is_core_schema(schema_or_field):
json_schema = populate_defs(schema_or_field, json_schema)
json_schema = convert_to_all_of(json_schema)
return json_schema
current_handler = _schema_generation_shared.GenerateJsonSchemaHandler(self, handler_func)
for js_modify_function in metadata_handler.metadata.get('pydantic_js_functions', ()):
def new_handler_func(
schema_or_field: CoreSchemaOrField,
current_handler: GetJsonSchemaHandler = current_handler,
js_modify_function: GetJsonSchemaFunction = js_modify_function,
) -> JsonSchemaValue:
json_schema = js_modify_function(schema_or_field, current_handler)
if _core_utils.is_core_schema(schema_or_field):
json_schema = populate_defs(schema_or_field, json_schema)
original_schema = current_handler.resolve_ref_schema(json_schema)
ref = json_schema.pop('$ref', None)
if ref and json_schema:
original_schema.update(json_schema)
return original_schema
current_handler = _schema_generation_shared.GenerateJsonSchemaHandler(self, new_handler_func)
for js_modify_function in metadata_handler.metadata.get('pydantic_js_annotation_functions', ()):
def new_handler_func(
schema_or_field: CoreSchemaOrField,
current_handler: GetJsonSchemaHandler = current_handler,
js_modify_function: GetJsonSchemaFunction = js_modify_function,
) -> JsonSchemaValue:
json_schema = js_modify_function(schema_or_field, current_handler)
if _core_utils.is_core_schema(schema_or_field):
json_schema = populate_defs(schema_or_field, json_schema)
json_schema = convert_to_all_of(json_schema)
return json_schema
current_handler = _schema_generation_shared.GenerateJsonSchemaHandler(self, new_handler_func)
json_schema = current_handler(schema)
if _core_utils.is_core_schema(schema):
json_schema = populate_defs(schema, json_schema)
json_schema = convert_to_all_of(json_schema)
return json_schema
# ### Schema generation methods
def any_schema(self, schema: core_schema.AnySchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches any value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return {}
def none_schema(self, schema: core_schema.NoneSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a None value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return {'type': 'null'}
def bool_schema(self, schema: core_schema.BoolSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a bool value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return {'type': 'boolean'}
def int_schema(self, schema: core_schema.IntSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches an Int value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
json_schema: dict[str, Any] = {'type': 'integer'}
self.update_with_validations(json_schema, schema, self.ValidationsMapping.numeric)
json_schema = {k: v for k, v in json_schema.items() if v not in {math.inf, -math.inf}}
return json_schema
def float_schema(self, schema: core_schema.FloatSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a float value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
json_schema: dict[str, Any] = {'type': 'number'}
self.update_with_validations(json_schema, schema, self.ValidationsMapping.numeric)
json_schema = {k: v for k, v in json_schema.items() if v not in {math.inf, -math.inf}}
return json_schema
def decimal_schema(self, schema: core_schema.DecimalSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a decimal value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
json_schema = self.str_schema(core_schema.str_schema())
if self.mode == 'validation':
multiple_of = schema.get('multiple_of')
le = schema.get('le')
ge = schema.get('ge')
lt = schema.get('lt')
gt = schema.get('gt')
json_schema = {
'anyOf': [
self.float_schema(
core_schema.float_schema(
allow_inf_nan=schema.get('allow_inf_nan'),
multiple_of=None if multiple_of is None else float(multiple_of),
le=None if le is None else float(le),
ge=None if ge is None else float(ge),
lt=None if lt is None else float(lt),
gt=None if gt is None else float(gt),
)
),
json_schema,
],
}
return json_schema
def str_schema(self, schema: core_schema.StringSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a string value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
json_schema = {'type': 'string'}
self.update_with_validations(json_schema, schema, self.ValidationsMapping.string)
return json_schema
def bytes_schema(self, schema: core_schema.BytesSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a bytes value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
json_schema = {'type': 'string', 'format': 'base64url' if self._config.ser_json_bytes == 'base64' else 'binary'}
self.update_with_validations(json_schema, schema, self.ValidationsMapping.bytes)
return json_schema
def date_schema(self, schema: core_schema.DateSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a date value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
json_schema = {'type': 'string', 'format': 'date'}
self.update_with_validations(json_schema, schema, self.ValidationsMapping.date)
return json_schema
def time_schema(self, schema: core_schema.TimeSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a time value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return {'type': 'string', 'format': 'time'}
def datetime_schema(self, schema: core_schema.DatetimeSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a datetime value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return {'type': 'string', 'format': 'date-time'}
def timedelta_schema(self, schema: core_schema.TimedeltaSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a timedelta value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
if self._config.ser_json_timedelta == 'float':
return {'type': 'number'}
return {'type': 'string', 'format': 'duration'}
def literal_schema(self, schema: core_schema.LiteralSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a literal value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
expected = [v.value if isinstance(v, Enum) else v for v in schema['expected']]
# jsonify the expected values
expected = [to_jsonable_python(v) for v in expected]
if len(expected) == 1:
return {'const': expected[0]}
types = {type(e) for e in expected}
if types == {str}:
return {'enum': expected, 'type': 'string'}
elif types == {int}:
return {'enum': expected, 'type': 'integer'}
elif types == {float}:
return {'enum': expected, 'type': 'number'}
elif types == {bool}:
return {'enum': expected, 'type': 'boolean'}
elif types == {list}:
return {'enum': expected, 'type': 'array'}
# there is not None case because if it's mixed it hits the final `else`
# if it's a single Literal[None] then it becomes a `const` schema above
else:
return {'enum': expected}
def is_instance_schema(self, schema: core_schema.IsInstanceSchema) -> JsonSchemaValue:
"""Generates a JSON schema that checks if a value is an instance of a class, equivalent to Python's
`isinstance` method.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self.handle_invalid_for_json_schema(schema, f'core_schema.IsInstanceSchema ({schema["cls"]})')
def is_subclass_schema(self, schema: core_schema.IsSubclassSchema) -> JsonSchemaValue:
"""Generates a JSON schema that checks if a value is a subclass of a class, equivalent to Python's `issubclass`
method.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
# Note: This is for compatibility with V1; you can override if you want different behavior.
return {}
def callable_schema(self, schema: core_schema.CallableSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a callable value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self.handle_invalid_for_json_schema(schema, 'core_schema.CallableSchema')
def list_schema(self, schema: core_schema.ListSchema) -> JsonSchemaValue:
"""Returns a schema that matches a list schema.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
items_schema = {} if 'items_schema' not in schema else self.generate_inner(schema['items_schema'])
json_schema = {'type': 'array', 'items': items_schema}
self.update_with_validations(json_schema, schema, self.ValidationsMapping.array)
return json_schema
def tuple_positional_schema(self, schema: core_schema.TuplePositionalSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a positional tuple schema e.g. `Tuple[int, str, bool]`.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
json_schema: JsonSchemaValue = {'type': 'array'}
json_schema['minItems'] = len(schema['items_schema'])
prefixItems = [self.generate_inner(item) for item in schema['items_schema']]
if prefixItems:
json_schema['prefixItems'] = prefixItems
if 'extras_schema' in schema:
json_schema['items'] = self.generate_inner(schema['extras_schema'])
else:
json_schema['maxItems'] = len(schema['items_schema'])
self.update_with_validations(json_schema, schema, self.ValidationsMapping.array)
return json_schema
def tuple_variable_schema(self, schema: core_schema.TupleVariableSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a variable tuple schema e.g. `Tuple[int, ...]`.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
items_schema = {} if 'items_schema' not in schema else self.generate_inner(schema['items_schema'])
json_schema = {'type': 'array', 'items': items_schema}
self.update_with_validations(json_schema, schema, self.ValidationsMapping.array)
return json_schema
def set_schema(self, schema: core_schema.SetSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a set schema.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self._common_set_schema(schema)
def frozenset_schema(self, schema: core_schema.FrozenSetSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a frozenset schema.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self._common_set_schema(schema)
def _common_set_schema(self, schema: core_schema.SetSchema | core_schema.FrozenSetSchema) -> JsonSchemaValue:
items_schema = {} if 'items_schema' not in schema else self.generate_inner(schema['items_schema'])
json_schema = {'type': 'array', 'uniqueItems': True, 'items': items_schema}
self.update_with_validations(json_schema, schema, self.ValidationsMapping.array)
return json_schema
def generator_schema(self, schema: core_schema.GeneratorSchema) -> JsonSchemaValue:
"""Returns a JSON schema that represents the provided GeneratorSchema.
Args:
schema: The schema.
Returns:
The generated JSON schema.
"""
items_schema = {} if 'items_schema' not in schema else self.generate_inner(schema['items_schema'])
json_schema = {'type': 'array', 'items': items_schema}
self.update_with_validations(json_schema, schema, self.ValidationsMapping.array)
return json_schema
def dict_schema(self, schema: core_schema.DictSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a dict schema.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
json_schema: JsonSchemaValue = {'type': 'object'}
keys_schema = self.generate_inner(schema['keys_schema']).copy() if 'keys_schema' in schema else {}
keys_pattern = keys_schema.pop('pattern', None)
values_schema = self.generate_inner(schema['values_schema']).copy() if 'values_schema' in schema else {}
values_schema.pop('title', None) # don't give a title to the additionalProperties
if values_schema or keys_pattern is not None: # don't add additionalProperties if it's empty
if keys_pattern is None:
json_schema['additionalProperties'] = values_schema
else:
json_schema['patternProperties'] = {keys_pattern: values_schema}
self.update_with_validations(json_schema, schema, self.ValidationsMapping.object)
return json_schema
def _function_schema(
self,
schema: _core_utils.AnyFunctionSchema,
) -> JsonSchemaValue:
if _core_utils.is_function_with_inner_schema(schema):
# This could be wrong if the function's mode is 'before', but in practice will often be right, and when it
# isn't, I think it would be hard to automatically infer what the desired schema should be.
return self.generate_inner(schema['schema'])
# function-plain
return self.handle_invalid_for_json_schema(
schema, f'core_schema.PlainValidatorFunctionSchema ({schema["function"]})'
)
def function_before_schema(self, schema: core_schema.BeforeValidatorFunctionSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a function-before schema.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self._function_schema(schema)
def function_after_schema(self, schema: core_schema.AfterValidatorFunctionSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a function-after schema.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self._function_schema(schema)
def function_plain_schema(self, schema: core_schema.PlainValidatorFunctionSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a function-plain schema.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self._function_schema(schema)
def function_wrap_schema(self, schema: core_schema.WrapValidatorFunctionSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a function-wrap schema.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self._function_schema(schema)
def default_schema(self, schema: core_schema.WithDefaultSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema with a default value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
json_schema = self.generate_inner(schema['schema'])
if 'default' not in schema:
return json_schema
default = schema['default']
# Note: if you want to include the value returned by the default_factory,
# override this method and replace the code above with:
# if 'default' in schema:
# default = schema['default']
# elif 'default_factory' in schema:
# default = schema['default_factory']()
# else:
# return json_schema
try:
encoded_default = self.encode_default(default)
except pydantic_core.PydanticSerializationError:
self.emit_warning(
'non-serializable-default',
f'Default value {default} is not JSON serializable; excluding default from JSON schema',
)
# Return the inner schema, as though there was no default
return json_schema
if '$ref' in json_schema:
# Since reference schemas do not support child keys, we wrap the reference schema in a single-case allOf:
return {'allOf': [json_schema], 'default': encoded_default}
else:
json_schema['default'] = encoded_default
return json_schema
def nullable_schema(self, schema: core_schema.NullableSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that allows null values.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
null_schema = {'type': 'null'}
inner_json_schema = self.generate_inner(schema['schema'])
if inner_json_schema == null_schema:
return null_schema
else:
# Thanks to the equality check against `null_schema` above, I think 'oneOf' would also be valid here;
# I'll use 'anyOf' for now, but it could be changed it if it would work better with some external tooling
return self.get_flattened_anyof([inner_json_schema, null_schema])
def union_schema(self, schema: core_schema.UnionSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that allows values matching any of the given schemas.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
generated: list[JsonSchemaValue] = []
choices = schema['choices']
for choice in choices:
# choice will be a tuple if an explicit label was provided
choice_schema = choice[0] if isinstance(choice, tuple) else choice
try:
generated.append(self.generate_inner(choice_schema))
except PydanticOmit:
continue
except PydanticInvalidForJsonSchema as exc:
self.emit_warning('skipped-choice', exc.message)
if len(generated) == 1:
return generated[0]
return self.get_flattened_anyof(generated)
def tagged_union_schema(self, schema: core_schema.TaggedUnionSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that allows values matching any of the given schemas, where
the schemas are tagged with a discriminator field that indicates which schema should be used to validate
the value.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
generated: dict[str, JsonSchemaValue] = {}
for k, v in schema['choices'].items():
if isinstance(k, Enum):
k = k.value
try:
# Use str(k) since keys must be strings for json; while not technically correct,
# it's the closest that can be represented in valid JSON
generated[str(k)] = self.generate_inner(v).copy()
except PydanticOmit:
continue
except PydanticInvalidForJsonSchema as exc:
self.emit_warning('skipped-choice', exc.message)
one_of_choices = _deduplicate_schemas(generated.values())
json_schema: JsonSchemaValue = {'oneOf': one_of_choices}
# This reflects the v1 behavior; TODO: we should make it possible to exclude OpenAPI stuff from the JSON schema
openapi_discriminator = self._extract_discriminator(schema, one_of_choices)
if openapi_discriminator is not None:
json_schema['discriminator'] = {
'propertyName': openapi_discriminator,
'mapping': {k: v.get('$ref', v) for k, v in generated.items()},
}
return json_schema
def _extract_discriminator(
self, schema: core_schema.TaggedUnionSchema, one_of_choices: list[_JsonDict]
) -> str | None:
"""Extract a compatible OpenAPI discriminator from the schema and one_of choices that end up in the final
schema."""
openapi_discriminator: str | None = None
if isinstance(schema['discriminator'], str):
return schema['discriminator']
if isinstance(schema['discriminator'], list):
# If the discriminator is a single item list containing a string, that is equivalent to the string case
if len(schema['discriminator']) == 1 and isinstance(schema['discriminator'][0], str):
return schema['discriminator'][0]
# When an alias is used that is different from the field name, the discriminator will be a list of single
# str lists, one for the attribute and one for the actual alias. The logic here will work even if there is
# more than one possible attribute, and looks for whether a single alias choice is present as a documented
# property on all choices. If so, that property will be used as the OpenAPI discriminator.
for alias_path in schema['discriminator']:
if not isinstance(alias_path, list):
break # this means that the discriminator is not a list of alias paths
if len(alias_path) != 1:
continue # this means that the "alias" does not represent a single field
alias = alias_path[0]
if not isinstance(alias, str):
continue # this means that the "alias" does not represent a field
alias_is_present_on_all_choices = True
for choice in one_of_choices:
while '$ref' in choice:
assert isinstance(choice['$ref'], str)
choice = self.get_schema_from_definitions(JsonRef(choice['$ref'])) or {}
properties = choice.get('properties', {})
if not isinstance(properties, dict) or alias not in properties:
alias_is_present_on_all_choices = False
break
if alias_is_present_on_all_choices:
openapi_discriminator = alias
break
return openapi_discriminator
def chain_schema(self, schema: core_schema.ChainSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a core_schema.ChainSchema.
When generating a schema for validation, we return the validation JSON schema for the first step in the chain.
For serialization, we return the serialization JSON schema for the last step in the chain.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
step_index = 0 if self.mode == 'validation' else -1 # use first step for validation, last for serialization
return self.generate_inner(schema['steps'][step_index])
def lax_or_strict_schema(self, schema: core_schema.LaxOrStrictSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that allows values matching either the lax schema or the
strict schema.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
# TODO: Need to read the default value off of model config or whatever
use_strict = schema.get('strict', False) # TODO: replace this default False
# If your JSON schema fails to generate it is probably
# because one of the following two branches failed.
if use_strict:
return self.generate_inner(schema['strict_schema'])
else:
return self.generate_inner(schema['lax_schema'])
def json_or_python_schema(self, schema: core_schema.JsonOrPythonSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that allows values matching either the JSON schema or the
Python schema.
The JSON schema is used instead of the Python schema. If you want to use the Python schema, you should override
this method.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self.generate_inner(schema['json_schema'])
def typed_dict_schema(self, schema: core_schema.TypedDictSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that defines a typed dict.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
total = schema.get('total', True)
named_required_fields: list[tuple[str, bool, CoreSchemaField]] = [
(name, self.field_is_required(field, total), field)
for name, field in schema['fields'].items()
if self.field_is_present(field)
]
if self.mode == 'serialization':
named_required_fields.extend(self._name_required_computed_fields(schema.get('computed_fields', [])))
config = _get_typed_dict_config(schema)
with self._config_wrapper_stack.push(config):
json_schema = self._named_required_fields_schema(named_required_fields)
extra = config.get('extra', 'ignore')
if extra == 'forbid':
json_schema['additionalProperties'] = False
elif extra == 'allow':
json_schema['additionalProperties'] = True
return json_schema
@staticmethod
def _name_required_computed_fields(
computed_fields: list[ComputedField],
) -> list[tuple[str, bool, core_schema.ComputedField]]:
return [(field['property_name'], True, field) for field in computed_fields]
def _named_required_fields_schema(
self, named_required_fields: Sequence[tuple[str, bool, CoreSchemaField]]
) -> JsonSchemaValue:
properties: dict[str, JsonSchemaValue] = {}
required_fields: list[str] = []
for name, required, field in named_required_fields:
if self.by_alias:
name = self._get_alias_name(field, name)
try:
field_json_schema = self.generate_inner(field).copy()
except PydanticOmit:
continue
if 'title' not in field_json_schema and self.field_title_should_be_set(field):
title = self.get_title_from_name(name)
field_json_schema['title'] = title
field_json_schema = self.handle_ref_overrides(field_json_schema)
properties[name] = field_json_schema
if required:
required_fields.append(name)
json_schema = {'type': 'object', 'properties': properties}
if required_fields:
json_schema['required'] = required_fields
return json_schema
def _get_alias_name(self, field: CoreSchemaField, name: str) -> str:
if field['type'] == 'computed-field':
alias: Any = field.get('alias', name)
elif self.mode == 'validation':
alias = field.get('validation_alias', name)
else:
alias = field.get('serialization_alias', name)
if isinstance(alias, str):
name = alias
elif isinstance(alias, list):
alias = cast('list[str] | str', alias)
for path in alias:
if isinstance(path, list) and len(path) == 1 and isinstance(path[0], str):
# Use the first valid single-item string path; the code that constructs the alias array
# should ensure the first such item is what belongs in the JSON schema
name = path[0]
break
else:
assert_never(alias)
return name
def typed_dict_field_schema(self, schema: core_schema.TypedDictField) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that defines a typed dict field.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self.generate_inner(schema['schema'])
def dataclass_field_schema(self, schema: core_schema.DataclassField) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that defines a dataclass field.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self.generate_inner(schema['schema'])
def model_field_schema(self, schema: core_schema.ModelField) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that defines a model field.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self.generate_inner(schema['schema'])
def computed_field_schema(self, schema: core_schema.ComputedField) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that defines a computed field.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
return self.generate_inner(schema['return_schema'])
def model_schema(self, schema: core_schema.ModelSchema) -> JsonSchemaValue:
"""Generates a JSON schema that matches a schema that defines a model.
Args:
schema: The core schema.
Returns:
The generated JSON schema.
"""
# We do not use schema['model'].model_json_schema() here
# because it could lead to inconsistent refs handling, etc.
cls = cast('type[BaseModel]', schema['cls'])
config = cls.model_config
title = config.get('title')
with self._config_wrapper_stack.push(config):
json_schema = self.generate_inner(schema['schema'])
json_schema_extra = config.get('json_schema_extra')
if cls.__pydantic_root_model__:
root_json_schema_extra = cls.model_fields['root'].json_schema_extra
if json_schema_extra and root_json_schema_extra:
raise ValueError(
'"model_config[\'json_schema_extra\']" and "Field.json_schema_extra" on "RootModel.root"'
' field must not be set simultaneously'
)
if root_json_schema_extra:
json_schema_extra = root_json_schema_extra
json_schema = self._update_class_schema(json_schema, title, config.get('extra', None), cls, json_schema_extra)
return json_schema
def _update_class_schema(
self,
json_schema: JsonSchemaValue,
title: str | None,
extra: Literal['allow', 'ignore', 'forbid'] | None,
cls: type[Any], | json_schema_extra: dict[str, Any] | JsonSchemaExtraCallable | None, | 9 | 2023-10-23 18:09:28+00:00 | 12k |
f0uriest/quadax | tests/test_adaptive.py | [
{
"identifier": "romberg",
"path": "quadax/romberg.py",
"snippet": "def romberg(\n fun,\n interval,\n args=(),\n full_output=False,\n epsabs=1.4e-8,\n epsrel=1.4e-8,\n divmax=20,\n norm=jnp.inf,\n):\n \"\"\"Romberg integration of a callable function or method.\n\n Returns t... | import jax.numpy as jnp
import numpy as np
import pytest
import scipy
from jax.config import config as jax_config
from quadax import quadcc, quadgk, quadts, romberg, rombergts | 8,417 | y, info = quadts(
prob["fun"],
prob["interval"],
epsabs=tol,
epsrel=tol,
**kwargs,
)
assert info.status == status
if status == 0:
assert info.err < max(tol, tol * np.max(np.abs(y)))
np.testing.assert_allclose(
y,
prob["val"],
rtol=fudge * tol,
atol=fudge * tol,
err_msg=f"problem {i}, tol={tol}",
)
def test_prob0(self):
"""Test for example problem #0."""
self._base(0, 1e-4)
self._base(0, 1e-8)
self._base(0, 1e-12)
def test_prob1(self):
"""Test for example problem #1."""
self._base(1, 1e-4)
self._base(1, 1e-8)
self._base(1, 1e-12)
def test_prob2(self):
"""Test for example problem #2."""
self._base(2, 1e-4, order=41)
self._base(2, 1e-8, order=41)
self._base(2, 1e-12, order=41)
def test_prob3(self):
"""Test for example problem #3."""
self._base(3, 1e-4, order=61)
self._base(3, 1e-8, order=61)
self._base(3, 1e-12, order=61)
def test_prob4(self):
"""Test for example problem #4."""
self._base(4, 1e-4, order=81)
self._base(4, 1e-8, order=81)
self._base(4, 1e-12, order=81)
def test_prob5(self):
"""Test for example problem #5."""
self._base(5, 1e-4, order=101)
self._base(5, 1e-8, order=101)
self._base(5, 1e-12, order=101)
def test_prob6(self):
"""Test for example problem #6."""
self._base(6, 1e-4)
self._base(6, 1e-8)
self._base(6, 1e-12, 1e4)
def test_prob7(self):
"""Test for example problem #7."""
self._base(7, 1e-4)
self._base(7, 1e-8)
self._base(7, 1e-12)
def test_prob8(self):
"""Test for example problem #8."""
self._base(8, 1e-4)
self._base(8, 1e-8)
self._base(8, 1e-12)
def test_prob9(self):
"""Test for example problem #9."""
self._base(9, 1e-4)
self._base(9, 1e-8, 10)
self._base(9, 1e-12, 1e4)
def test_prob10(self):
"""Test for example problem #10."""
self._base(10, 1e-4)
self._base(10, 1e-8)
self._base(10, 1e-12)
def test_prob11(self):
"""Test for example problem #11."""
self._base(11, 1e-4)
self._base(11, 1e-8)
self._base(11, 1e-12, 1e4)
def test_prob12(self):
"""Test for example problem #12."""
self._base(12, 1e-4)
self._base(12, 1e-8)
self._base(12, 1e-12)
def test_prob13(self):
"""Test for example problem #13."""
self._base(13, 1e-4)
self._base(13, 1e-8)
self._base(13, 1e-12)
def test_prob14(self):
"""Test for example problem #14."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
def test_prob15(self):
"""Test for example problem #15."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
class TestRombergTS:
"""Tests for tanh-sinh quadrature with adaptive refinement."""
def _base(self, i, tol, fudge=1, **kwargs):
prob = example_problems[i]
| """Tests for adaptive quadrature routines."""
jax_config.update("jax_enable_x64", True)
example_problems = [
# problem 0
{"fun": lambda t: t * jnp.log(1 + t), "interval": [0, 1], "val": 1 / 4},
# problem 1
{
"fun": lambda t: t**2 * jnp.arctan(t),
"interval": [0, 1],
"val": (jnp.pi - 2 + 2 * jnp.log(2)) / 12,
},
# problem 2
{
"fun": lambda t: jnp.exp(t) * jnp.cos(t),
"interval": [0, jnp.pi / 2],
"val": (jnp.exp(jnp.pi / 2) - 1) / 2,
},
# problem 3
{
"fun": lambda t: jnp.arctan(jnp.sqrt(2 + t**2))
/ ((1 + t**2) * jnp.sqrt(2 + t**2)),
"interval": [0, 1],
"val": 5 * jnp.pi**2 / 96,
},
# problem 4
{"fun": lambda t: jnp.sqrt(t) * jnp.log(t), "interval": [0, 1], "val": -4 / 9},
# problem 5
{"fun": lambda t: jnp.sqrt(1 - t**2), "interval": [0, 1], "val": jnp.pi / 4},
# problem 6
{
"fun": lambda t: jnp.sqrt(t) / jnp.sqrt(1 - t**2),
"interval": [0, 1],
"val": 2
* jnp.sqrt(jnp.pi)
* scipy.special.gamma(3 / 4)
/ scipy.special.gamma(1 / 4),
},
# problem 7
{"fun": lambda t: jnp.log(t) ** 2, "interval": [0, 1], "val": 2},
# problem 8
{
"fun": lambda t: jnp.log(jnp.cos(t)),
"interval": [0, jnp.pi / 2],
"val": -jnp.pi * jnp.log(2) / 2,
},
# problem 9
{
"fun": lambda t: jnp.sqrt(jnp.tan(t)),
"interval": [0, jnp.pi / 2],
"val": jnp.pi * jnp.sqrt(2) / 2,
},
# problem 10
{"fun": lambda t: 1 / (1 + t**2), "interval": [0, jnp.inf], "val": jnp.pi / 2},
# problem 11
{
"fun": lambda t: jnp.exp(-t) / jnp.sqrt(t),
"interval": [0, jnp.inf],
"val": jnp.sqrt(jnp.pi),
},
# problem 12
{
"fun": lambda t: jnp.exp(-(t**2) / 2),
"interval": [-jnp.inf, jnp.inf],
"val": jnp.sqrt(2 * jnp.pi),
},
# problem 13
{"fun": lambda t: jnp.exp(-t) * jnp.cos(t), "interval": [0, jnp.inf], "val": 1 / 2},
# problem 14 - vector valued integrand made of up problems 0 and 1
{
"fun": lambda t: jnp.array([t * jnp.log(1 + t), t**2 * jnp.arctan(t)]),
"interval": [0, 1],
"val": jnp.array([1 / 4, (jnp.pi - 2 + 2 * jnp.log(2)) / 12]),
},
# problem 15 - intergral with breakpoints
{
"fun": lambda t: jnp.log((t - 1) ** 2),
"interval": [0, 1, 2],
"val": -4,
},
]
class TestQuadGK:
"""Tests for Gauss-Konrod quadrature."""
def _base(self, i, tol, fudge=1, **kwargs):
prob = example_problems[i]
status = kwargs.pop("status", 0)
y, info = quadgk(
prob["fun"],
prob["interval"],
epsabs=tol,
epsrel=tol,
**kwargs,
)
assert info.status == status
if status == 0:
assert info.err < max(tol, tol * np.max(np.abs(y)))
np.testing.assert_allclose(
y,
prob["val"],
rtol=fudge * tol,
atol=fudge * tol,
err_msg=f"problem {i}, tol={tol}",
)
def test_prob0(self):
"""Test for example problem #0."""
self._base(0, 1e-4, order=21)
self._base(0, 1e-8, order=21)
self._base(0, 1e-12, order=21)
def test_prob1(self):
"""Test for example problem #1."""
self._base(1, 1e-4, order=31)
self._base(1, 1e-8, order=31)
self._base(1, 1e-12, order=31)
def test_prob2(self):
"""Test for example problem #2."""
self._base(2, 1e-4, order=41)
self._base(2, 1e-8, order=41)
self._base(2, 1e-12, order=41)
def test_prob3(self):
"""Test for example problem #3."""
self._base(3, 1e-4, order=51)
self._base(3, 1e-8, order=51)
self._base(3, 1e-12, order=51)
def test_prob4(self):
"""Test for example problem #4."""
self._base(4, 1e-4, order=61)
self._base(4, 1e-8, order=61)
self._base(4, 1e-12, order=61)
def test_prob5(self):
"""Test for example problem #5."""
self._base(5, 1e-4, order=21)
self._base(5, 1e-8, order=21)
self._base(5, 1e-12, order=21)
def test_prob6(self):
"""Test for example problem #6."""
self._base(6, 1e-4, order=15)
self._base(6, 1e-8, 100, order=15)
self._base(6, 1e-12, 1e5, order=15, max_ninter=100, status=8)
def test_prob7(self):
"""Test for example problem #7."""
self._base(7, 1e-4, order=61)
self._base(7, 1e-8, order=61)
self._base(7, 1e-12, order=61, status=4)
def test_prob8(self):
"""Test for example problem #8."""
self._base(8, 1e-4, order=51)
self._base(8, 1e-8, order=51)
self._base(8, 1e-12, order=51, status=4)
def test_prob9(self):
"""Test for example problem #9."""
self._base(9, 1e-4, order=15)
self._base(9, 1e-8, 100, order=15)
self._base(9, 1e-12, 1e4, order=15, max_ninter=100, status=8)
def test_prob10(self):
"""Test for example problem #10."""
self._base(10, 1e-4, order=15)
self._base(10, 1e-8, order=15)
self._base(10, 1e-12, order=15)
def test_prob11(self):
"""Test for example problem #11."""
self._base(11, 1e-4, order=21)
self._base(11, 1e-8, 100, order=21)
self._base(11, 1e-12, 1e4, order=21, status=8, max_ninter=100)
def test_prob12(self):
"""Test for example problem #12."""
self._base(12, 1e-4, order=15)
self._base(12, 1e-8, order=15)
self._base(12, 1e-12, order=15)
def test_prob13(self):
"""Test for example problem #13."""
self._base(13, 1e-4, order=31)
self._base(13, 1e-8, order=31)
self._base(13, 1e-12, order=31)
def test_prob14(self):
"""Test for example problem #14."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
def test_prob15(self):
"""Test for example problem #15."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
class TestQuadCC:
"""Tests for Clenshaw-Curtis quadrature."""
def _base(self, i, tol, fudge=1, **kwargs):
prob = example_problems[i]
status = kwargs.pop("status", 0)
y, info = quadcc(
prob["fun"],
prob["interval"],
epsabs=tol,
epsrel=tol,
**kwargs,
)
assert info.status == status
if status == 0:
assert info.err < max(tol, tol * np.max(np.abs(y)))
np.testing.assert_allclose(
y,
prob["val"],
rtol=fudge * tol,
atol=fudge * tol,
err_msg=f"problem {i}, tol={tol}",
)
def test_prob0(self):
"""Test for example problem #0."""
self._base(0, 1e-4, order=32)
self._base(0, 1e-8, order=32)
self._base(0, 1e-12, order=32)
def test_prob1(self):
"""Test for example problem #1."""
self._base(1, 1e-4, order=64)
self._base(1, 1e-8, order=64)
self._base(1, 1e-12, order=64)
def test_prob2(self):
"""Test for example problem #2."""
self._base(2, 1e-4, order=128)
self._base(2, 1e-8, order=128)
self._base(2, 1e-12, order=128)
def test_prob3(self):
"""Test for example problem #3."""
self._base(3, 1e-4, order=256)
self._base(3, 1e-8, order=256)
self._base(3, 1e-12, order=256)
def test_prob4(self):
"""Test for example problem #4."""
self._base(4, 1e-4, order=8)
self._base(4, 1e-8, order=8)
self._base(4, 1e-12, order=8, max_ninter=100)
def test_prob5(self):
"""Test for example problem #5."""
self._base(5, 1e-4, order=16)
self._base(5, 1e-8, order=16)
self._base(5, 1e-12, order=16)
def test_prob6(self):
"""Test for example problem #6."""
self._base(6, 1e-4)
self._base(6, 1e-8, 100)
self._base(6, 1e-12, 1e5, max_ninter=100, status=8)
def test_prob7(self):
"""Test for example problem #7."""
self._base(7, 1e-4)
self._base(7, 1e-8, 10)
self._base(7, 1e-12, status=8)
def test_prob8(self):
"""Test for example problem #8."""
self._base(8, 1e-4)
self._base(8, 1e-8)
self._base(8, 1e-12, status=8)
def test_prob9(self):
"""Test for example problem #9."""
self._base(9, 1e-4)
self._base(9, 1e-8, max_ninter=100, status=8)
self._base(9, 1e-12, 1e4, max_ninter=100, status=8)
def test_prob10(self):
"""Test for example problem #10."""
self._base(10, 1e-4)
self._base(10, 1e-8)
self._base(10, 1e-12, 10)
def test_prob11(self):
"""Test for example problem #11."""
self._base(11, 1e-4)
self._base(11, 1e-8, 100)
self._base(11, 1e-12, 1e4, status=8)
def test_prob12(self):
"""Test for example problem #12."""
self._base(12, 1e-4)
self._base(12, 1e-8)
self._base(12, 1e-12)
def test_prob13(self):
"""Test for example problem #13."""
self._base(13, 1e-4)
self._base(13, 1e-8)
self._base(13, 1e-12)
def test_prob14(self):
"""Test for example problem #14."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
def test_prob15(self):
"""Test for example problem #15."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
class TestQuadTS:
"""Tests for adaptive tanh-sinh quadrature."""
def _base(self, i, tol, fudge=1, **kwargs):
prob = example_problems[i]
status = kwargs.pop("status", 0)
y, info = quadts(
prob["fun"],
prob["interval"],
epsabs=tol,
epsrel=tol,
**kwargs,
)
assert info.status == status
if status == 0:
assert info.err < max(tol, tol * np.max(np.abs(y)))
np.testing.assert_allclose(
y,
prob["val"],
rtol=fudge * tol,
atol=fudge * tol,
err_msg=f"problem {i}, tol={tol}",
)
def test_prob0(self):
"""Test for example problem #0."""
self._base(0, 1e-4)
self._base(0, 1e-8)
self._base(0, 1e-12)
def test_prob1(self):
"""Test for example problem #1."""
self._base(1, 1e-4)
self._base(1, 1e-8)
self._base(1, 1e-12)
def test_prob2(self):
"""Test for example problem #2."""
self._base(2, 1e-4, order=41)
self._base(2, 1e-8, order=41)
self._base(2, 1e-12, order=41)
def test_prob3(self):
"""Test for example problem #3."""
self._base(3, 1e-4, order=61)
self._base(3, 1e-8, order=61)
self._base(3, 1e-12, order=61)
def test_prob4(self):
"""Test for example problem #4."""
self._base(4, 1e-4, order=81)
self._base(4, 1e-8, order=81)
self._base(4, 1e-12, order=81)
def test_prob5(self):
"""Test for example problem #5."""
self._base(5, 1e-4, order=101)
self._base(5, 1e-8, order=101)
self._base(5, 1e-12, order=101)
def test_prob6(self):
"""Test for example problem #6."""
self._base(6, 1e-4)
self._base(6, 1e-8)
self._base(6, 1e-12, 1e4)
def test_prob7(self):
"""Test for example problem #7."""
self._base(7, 1e-4)
self._base(7, 1e-8)
self._base(7, 1e-12)
def test_prob8(self):
"""Test for example problem #8."""
self._base(8, 1e-4)
self._base(8, 1e-8)
self._base(8, 1e-12)
def test_prob9(self):
"""Test for example problem #9."""
self._base(9, 1e-4)
self._base(9, 1e-8, 10)
self._base(9, 1e-12, 1e4)
def test_prob10(self):
"""Test for example problem #10."""
self._base(10, 1e-4)
self._base(10, 1e-8)
self._base(10, 1e-12)
def test_prob11(self):
"""Test for example problem #11."""
self._base(11, 1e-4)
self._base(11, 1e-8)
self._base(11, 1e-12, 1e4)
def test_prob12(self):
"""Test for example problem #12."""
self._base(12, 1e-4)
self._base(12, 1e-8)
self._base(12, 1e-12)
def test_prob13(self):
"""Test for example problem #13."""
self._base(13, 1e-4)
self._base(13, 1e-8)
self._base(13, 1e-12)
def test_prob14(self):
"""Test for example problem #14."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
def test_prob15(self):
"""Test for example problem #15."""
self._base(14, 1e-4)
self._base(14, 1e-8)
self._base(14, 1e-12)
class TestRombergTS:
"""Tests for tanh-sinh quadrature with adaptive refinement."""
def _base(self, i, tol, fudge=1, **kwargs):
prob = example_problems[i] | y, info = rombergts( | 1 | 2023-10-24 04:44:34+00:00 | 12k |
zju3dv/nr_in_a_room | models_neurecon/neus_multi_rendering.py | [
{
"identifier": "NeuS",
"path": "models_neurecon/neus.py",
"snippet": "class NeuS(nn.Module):\n def __init__(\n self,\n variance_init=0.05,\n speed_factor=1.0,\n input_ch=3,\n input_obj_ch=0,\n input_light_ch=0,\n input_appearance_ch=0,\n W_geo_... | import ipdb
import torch
import sys
import os
import copy
from typing import List, Dict, Any
from einops import rearrange, reduce, repeat
from models_neurecon.neus import NeuS, volume_render
from models_neurecon.base import ImplicitSurface | 7,451 |
sys.path.append(os.getcwd()) # noqa
def volume_rendering_multi_neus(
results,
typ,
z_vals_list,
rgbs_list,
alphas_list,
noise_std,
white_back,
obj_ids_list=None,
):
N_objs = len(z_vals_list)
# order via z_vals
z_vals = torch.cat(z_vals_list, 1) # (N_rays, N_samples*N_objs)
rgbs = torch.cat(rgbs_list, 1) # (N_rays, N_samples*N_objs, 3)
alphas = torch.cat(alphas_list, 1) # (N_rays, N_samples*N_objs)
z_vals, idx_sorted = torch.sort(z_vals, -1)
for i in range(3):
rgbs[:, :, i] = torch.gather(rgbs[:, :, i].clone(), dim=1, index=idx_sorted)
alphas = torch.gather(alphas, dim=1, index=idx_sorted)
# record object ids for recovering weights of each object after sorting
if obj_ids_list != None:
obj_ids = torch.cat(obj_ids_list, -1)
results[f"obj_ids_{typ}"] = torch.gather(obj_ids, dim=1, index=idx_sorted)
alphas_shifted = torch.cat(
[torch.ones_like(alphas[:, :1]), 1 - alphas + 1e-10], -1
) # [1, 1-a1, 1-a2, ...]
weights = alphas * torch.cumprod(alphas_shifted[:, :-1], -1) # (N_rays, N_samples_)
weights_sum = reduce(
weights, "n1 n2 -> n1", "sum"
) # (N_rays), the accumulated opacity along the rays
# equals "1 - (1-a1)(1-a2)...(1-an)" mathematically
# results[f"weights_{typ}"] = weights
results[f"opacity_{typ}"] = weights_sum
# results[f"z_vals_{typ}"] = z_vals
rgb_map = reduce(
rearrange(weights, "n1 n2 -> n1 n2 1") * rgbs, "n1 n2 c -> n1 c", "sum"
)
depth_map = reduce(weights * z_vals, "n1 n2 -> n1", "sum")
if white_back:
rgb_map = rgb_map + 1 - weights_sum.unsqueeze(-1)
results[f"rgb_{typ}"] = rgb_map
results[f"depth_{typ}"] = depth_map
# adopt from neurecon/ray_casting.py
def sphere_tracing_surface_points(
|
sys.path.append(os.getcwd()) # noqa
def volume_rendering_multi_neus(
results,
typ,
z_vals_list,
rgbs_list,
alphas_list,
noise_std,
white_back,
obj_ids_list=None,
):
N_objs = len(z_vals_list)
# order via z_vals
z_vals = torch.cat(z_vals_list, 1) # (N_rays, N_samples*N_objs)
rgbs = torch.cat(rgbs_list, 1) # (N_rays, N_samples*N_objs, 3)
alphas = torch.cat(alphas_list, 1) # (N_rays, N_samples*N_objs)
z_vals, idx_sorted = torch.sort(z_vals, -1)
for i in range(3):
rgbs[:, :, i] = torch.gather(rgbs[:, :, i].clone(), dim=1, index=idx_sorted)
alphas = torch.gather(alphas, dim=1, index=idx_sorted)
# record object ids for recovering weights of each object after sorting
if obj_ids_list != None:
obj_ids = torch.cat(obj_ids_list, -1)
results[f"obj_ids_{typ}"] = torch.gather(obj_ids, dim=1, index=idx_sorted)
alphas_shifted = torch.cat(
[torch.ones_like(alphas[:, :1]), 1 - alphas + 1e-10], -1
) # [1, 1-a1, 1-a2, ...]
weights = alphas * torch.cumprod(alphas_shifted[:, :-1], -1) # (N_rays, N_samples_)
weights_sum = reduce(
weights, "n1 n2 -> n1", "sum"
) # (N_rays), the accumulated opacity along the rays
# equals "1 - (1-a1)(1-a2)...(1-an)" mathematically
# results[f"weights_{typ}"] = weights
results[f"opacity_{typ}"] = weights_sum
# results[f"z_vals_{typ}"] = z_vals
rgb_map = reduce(
rearrange(weights, "n1 n2 -> n1 n2 1") * rgbs, "n1 n2 c -> n1 c", "sum"
)
depth_map = reduce(weights * z_vals, "n1 n2 -> n1", "sum")
if white_back:
rgb_map = rgb_map + 1 - weights_sum.unsqueeze(-1)
results[f"rgb_{typ}"] = rgb_map
results[f"depth_{typ}"] = depth_map
# adopt from neurecon/ray_casting.py
def sphere_tracing_surface_points( | implicit_surface: ImplicitSurface, | 2 | 2023-10-15 08:41:29+00:00 | 12k |
chenxn2020/GOSE | GOSEfinetune/models/layoutlmv2/modeling_layoutlmv2.py | [
{
"identifier": "ReOutput",
"path": "GOSEfinetune/utils.py",
"snippet": "class ReOutput(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = No... | import math
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
import detectron2
from torch import nn
from torch.nn import CrossEntropyLoss
from detectron2.modeling import META_ARCH_REGISTRY
from transformers import PreTrainedModel
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
TokenClassifierOutput,
)
from transformers.modeling_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMIntermediate as LayoutLMv2Intermediate
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMOutput as LayoutLMv2Output
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMPooler as LayoutLMv2Pooler
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMSelfOutput as LayoutLMv2SelfOutput
from transformers.utils import logging
from ...utils import ReOutput
from .configuration_layoutlmv2 import LayoutLMv2Config
from .detectron2_config import add_layoutlmv2_config
from ...modules.decoders.gose import GOSE | 9,920 | last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
class LayoutLMv2ForTokenClassification(LayoutLMv2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.layoutlmv2 = LayoutLMv2Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def get_input_embeddings(self):
return self.layoutlmv2.embeddings.word_embeddings
def forward(
self,
input_ids=None,
bbox=None,
image=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlmv2(
input_ids=input_ids,
bbox=bbox,
image=image,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
seq_length = input_ids.size(1)
sequence_output, image_output = outputs[0][:, :seq_length], outputs[0][:, seq_length:]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class LayoutLMv2ForRelationExtraction(LayoutLMv2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.layoutlmv2 = LayoutLMv2Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.decoder = config.decoder_name
self.decoder = config.decoder_name
if self.decoder == 're':
self.extractor = REDecoder(config, config.hidden_size)
elif self.decoder == 'gose':
self.extractor = GOSE(config)
self.init_weights()
def forward(
self,
input_ids,
bbox,
labels=None,
image=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
entities=None,
relations=None,
):
outputs = self.layoutlmv2(
input_ids=input_ids,
bbox=bbox,
image=image,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
)
seq_length = input_ids.size(1)
sequence_output, image_output = outputs[0][:, :seq_length], outputs[0][:, seq_length:]
sequence_output = self.dropout(sequence_output)
loss, pred_relations = self.extractor(sequence_output, entities, relations, bbox)
| # coding=utf-8
logger = logging.get_logger(__name__)
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST = [
"layoutlmv2-base-uncased",
"layoutlmv2-large-uncased",
]
LayoutLMv2LayerNorm = torch.nn.LayerNorm
class LayoutLMv2Embeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super(LayoutLMv2Embeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = LayoutLMv2LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def _cal_spatial_position_embeddings(self, bbox):
try:
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
except IndexError as e:
raise IndexError("The :obj:`bbox`coordinate values should be within 0-1000 range.") from e
h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])
w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])
spatial_position_embeddings = torch.cat(
[
left_position_embeddings,
upper_position_embeddings,
right_position_embeddings,
lower_position_embeddings,
h_position_embeddings,
w_position_embeddings,
],
dim=-1,
)
return spatial_position_embeddings
class LayoutLMv2SelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.fast_qkv = config.fast_qkv
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
if config.fast_qkv:
self.qkv_linear = nn.Linear(config.hidden_size, 3 * self.all_head_size, bias=False)
self.q_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))
self.v_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))
else:
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def compute_qkv(self, hidden_states):
if self.fast_qkv:
qkv = self.qkv_linear(hidden_states)
q, k, v = torch.chunk(qkv, 3, dim=-1)
if q.ndimension() == self.q_bias.ndimension():
q = q + self.q_bias
v = v + self.v_bias
else:
_sz = (1,) * (q.ndimension() - 1) + (-1,)
q = q + self.q_bias.view(*_sz)
v = v + self.v_bias.view(*_sz)
else:
q = self.query(hidden_states)
k = self.key(hidden_states)
v = self.value(hidden_states)
return q, k, v
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
q, k, v = self.compute_qkv(hidden_states)
# (B, L, H*D) -> (B, H, L, D)
query_layer = self.transpose_for_scores(q)
key_layer = self.transpose_for_scores(k)
value_layer = self.transpose_for_scores(v)
query_layer = query_layer / math.sqrt(self.attention_head_size)
# [BSZ, NAT, L, L]
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.has_relative_attention_bias:
attention_scores += rel_pos
if self.has_spatial_attention_bias:
attention_scores += rel_2d_pos
attention_scores = attention_scores.float().masked_fill_(attention_mask.to(torch.bool), float("-inf"))
attention_probs = F.softmax(attention_scores, dim=-1, dtype=torch.float32).type_as(value_layer)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class LayoutLMv2Attention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = LayoutLMv2SelfAttention(config)
self.output = LayoutLMv2SelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class LayoutLMv2Layer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = LayoutLMv2Attention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = LayoutLMv2Attention(config)
self.intermediate = LayoutLMv2Intermediate(config)
self.output = LayoutLMv2Output(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
ret = 0
if bidirectional:
num_buckets //= 2
ret += (relative_position > 0).long() * num_buckets
n = torch.abs(relative_position)
else:
n = torch.max(-relative_position, torch.zeros_like(relative_position))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
class LayoutLMv2Encoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([LayoutLMv2Layer(config) for _ in range(config.num_hidden_layers)])
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
if self.has_relative_attention_bias:
self.rel_pos_bins = config.rel_pos_bins
self.max_rel_pos = config.max_rel_pos
self.rel_pos_onehot_size = config.rel_pos_bins
self.rel_pos_bias = nn.Linear(self.rel_pos_onehot_size, config.num_attention_heads, bias=False)
if self.has_spatial_attention_bias:
self.max_rel_2d_pos = config.max_rel_2d_pos
self.rel_2d_pos_bins = config.rel_2d_pos_bins
self.rel_2d_pos_onehot_size = config.rel_2d_pos_bins
self.rel_pos_x_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False)
self.rel_pos_y_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False)
def _cal_1d_pos_emb(self, hidden_states, position_ids):
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat,
num_buckets=self.rel_pos_bins,
max_distance=self.max_rel_pos,
)
rel_pos = F.one_hot(rel_pos, num_classes=self.rel_pos_onehot_size).type_as(hidden_states)
rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2)
rel_pos = rel_pos.contiguous()
return rel_pos
def _cal_2d_pos_emb(self, hidden_states, bbox):
position_coord_x = bbox[:, :, 0]
position_coord_y = bbox[:, :, 3]
rel_pos_x_2d_mat = position_coord_x.unsqueeze(-2) - position_coord_x.unsqueeze(-1)
rel_pos_y_2d_mat = position_coord_y.unsqueeze(-2) - position_coord_y.unsqueeze(-1)
rel_pos_x = relative_position_bucket(
rel_pos_x_2d_mat,
num_buckets=self.rel_2d_pos_bins,
max_distance=self.max_rel_2d_pos,
)
rel_pos_y = relative_position_bucket(
rel_pos_y_2d_mat,
num_buckets=self.rel_2d_pos_bins,
max_distance=self.max_rel_2d_pos,
)
rel_pos_x = F.one_hot(rel_pos_x, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)
rel_pos_y = F.one_hot(rel_pos_y, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)
rel_pos_x = self.rel_pos_x_bias(rel_pos_x).permute(0, 3, 1, 2)
rel_pos_y = self.rel_pos_y_bias(rel_pos_y).permute(0, 3, 1, 2)
rel_pos_x = rel_pos_x.contiguous()
rel_pos_y = rel_pos_y.contiguous()
rel_2d_pos = rel_pos_x + rel_pos_y
return rel_2d_pos
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
bbox=None,
position_ids=None,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
rel_pos = self._cal_1d_pos_emb(hidden_states, position_ids) if self.has_relative_attention_bias else None
rel_2d_pos = self._cal_2d_pos_emb(hidden_states, bbox) if self.has_spatial_attention_bias else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class LayoutLMv2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LayoutLMv2Config
pretrained_model_archive_map = LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST
base_model_prefix = "layoutlmv2"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, LayoutLMv2LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def my_convert_sync_batchnorm(module, process_group=None):
# same as `nn.modules.SyncBatchNorm.convert_sync_batchnorm` but allowing converting from `detectron2.layers.FrozenBatchNorm2d`
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
return nn.modules.SyncBatchNorm.convert_sync_batchnorm(module, process_group)
module_output = module
if isinstance(module, detectron2.layers.FrozenBatchNorm2d):
module_output = torch.nn.SyncBatchNorm(
num_features=module.num_features,
eps=module.eps,
affine=True,
track_running_stats=True,
process_group=process_group,
)
module_output.weight = torch.nn.Parameter(module.weight)
module_output.bias = torch.nn.Parameter(module.bias)
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = torch.tensor(0, dtype=torch.long, device=module.running_mean.device)
for name, child in module.named_children():
module_output.add_module(name, my_convert_sync_batchnorm(child, process_group))
del module
return module_output
class VisualBackbone(nn.Module):
def __init__(self, config):
super().__init__()
self.cfg = detectron2.config.get_cfg()
add_layoutlmv2_config(self.cfg)
meta_arch = self.cfg.MODEL.META_ARCHITECTURE
model = META_ARCH_REGISTRY.get(meta_arch)(self.cfg)
assert isinstance(model.backbone, detectron2.modeling.backbone.FPN)
self.backbone = model.backbone
if (
config.convert_sync_batchnorm
and torch.distributed.is_available()
and torch.distributed.is_initialized()
and torch.distributed.get_rank() > -1
):
self_rank = torch.distributed.get_rank()
node_size = torch.cuda.device_count()
world_size = torch.distributed.get_world_size()
assert world_size % node_size == 0
node_global_ranks = [
list(range(i * node_size, (i + 1) * node_size)) for i in range(world_size // node_size)
]
sync_bn_groups = [
torch.distributed.new_group(ranks=node_global_ranks[i]) for i in range(world_size // node_size)
]
node_rank = self_rank // node_size
assert self_rank in node_global_ranks[node_rank]
self.backbone = my_convert_sync_batchnorm(self.backbone, process_group=sync_bn_groups[node_rank])
assert len(self.cfg.MODEL.PIXEL_MEAN) == len(self.cfg.MODEL.PIXEL_STD)
num_channels = len(self.cfg.MODEL.PIXEL_MEAN)
self.register_buffer(
"pixel_mean",
torch.Tensor(self.cfg.MODEL.PIXEL_MEAN).view(num_channels, 1, 1),
)
self.register_buffer("pixel_std", torch.Tensor(self.cfg.MODEL.PIXEL_STD).view(num_channels, 1, 1))
self.out_feature_key = "p2"
if torch.is_deterministic():
logger.warning("using `AvgPool2d` instead of `AdaptiveAvgPool2d`")
input_shape = (224, 224)
backbone_stride = self.backbone.output_shape()[self.out_feature_key].stride
self.pool = nn.AvgPool2d(
(
math.ceil(math.ceil(input_shape[0] / backbone_stride) / config.image_feature_pool_shape[0]),
math.ceil(math.ceil(input_shape[1] / backbone_stride) / config.image_feature_pool_shape[1]),
)
)
else:
self.pool = nn.AdaptiveAvgPool2d(config.image_feature_pool_shape[:2])
if len(config.image_feature_pool_shape) == 2:
config.image_feature_pool_shape.append(self.backbone.output_shape()[self.out_feature_key].channels)
assert self.backbone.output_shape()[self.out_feature_key].channels == config.image_feature_pool_shape[2]
def forward(self, images):
images_input = ((images if torch.is_tensor(images) else images.tensor) - self.pixel_mean) / self.pixel_std
features = self.backbone(images_input)
features = features[self.out_feature_key]
features = self.pool(features).flatten(start_dim=2).transpose(1, 2).contiguous()
return features
class LayoutLMv2Model(LayoutLMv2PreTrainedModel):
def __init__(self, config):
super(LayoutLMv2Model, self).__init__(config)
self.config = config
self.has_visual_segment_embedding = config.has_visual_segment_embedding
self.embeddings = LayoutLMv2Embeddings(config)
self.visual = VisualBackbone(config)
self.visual_proj = nn.Linear(config.image_feature_pool_shape[-1], config.hidden_size)
if self.has_visual_segment_embedding:
self.visual_segment_embedding = nn.Parameter(nn.Embedding(1, config.hidden_size).weight[0])
self.visual_LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.visual_dropout = nn.Dropout(config.hidden_dropout_prob)
self.encoder = LayoutLMv2Encoder(config)
self.pooler = LayoutLMv2Pooler(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def _calc_text_embeddings(self, input_ids, bbox, position_ids, token_type_ids):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.embeddings.word_embeddings(input_ids)
position_embeddings = self.embeddings.position_embeddings(position_ids)
spatial_position_embeddings = self.embeddings._cal_spatial_position_embeddings(bbox)
token_type_embeddings = self.embeddings.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + spatial_position_embeddings + token_type_embeddings
embeddings = self.embeddings.LayerNorm(embeddings)
embeddings = self.embeddings.dropout(embeddings)
return embeddings
def _calc_img_embeddings(self, image, bbox, position_ids):
visual_embeddings = self.visual_proj(self.visual(image))
position_embeddings = self.embeddings.position_embeddings(position_ids)
spatial_position_embeddings = self.embeddings._cal_spatial_position_embeddings(bbox)
embeddings = visual_embeddings + position_embeddings + spatial_position_embeddings
if self.has_visual_segment_embedding:
embeddings += self.visual_segment_embedding
embeddings = self.visual_LayerNorm(embeddings)
embeddings = self.visual_dropout(embeddings)
return embeddings
def forward(
self,
input_ids=None,
bbox=None,
image=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
visual_shape = list(input_shape)
visual_shape[1] = self.config.image_feature_pool_shape[0] * self.config.image_feature_pool_shape[1]
visual_shape = torch.Size(visual_shape)
final_shape = list(input_shape)
final_shape[1] += visual_shape[1]
final_shape = torch.Size(final_shape)
visual_bbox_x = (
torch.arange(
0,
1000 * (self.config.image_feature_pool_shape[1] + 1),
1000,
device=device,
dtype=bbox.dtype,
)
// self.config.image_feature_pool_shape[1]
)
visual_bbox_y = (
torch.arange(
0,
1000 * (self.config.image_feature_pool_shape[0] + 1),
1000,
device=device,
dtype=bbox.dtype,
)
// self.config.image_feature_pool_shape[0]
)
visual_bbox = torch.stack(
[
visual_bbox_x[:-1].repeat(self.config.image_feature_pool_shape[0], 1),
visual_bbox_y[:-1].repeat(self.config.image_feature_pool_shape[1], 1).transpose(0, 1),
visual_bbox_x[1:].repeat(self.config.image_feature_pool_shape[0], 1),
visual_bbox_y[1:].repeat(self.config.image_feature_pool_shape[1], 1).transpose(0, 1),
],
dim=-1,
).view(-1, bbox.size(-1))
visual_bbox = visual_bbox.repeat(final_shape[0], 1, 1)
final_bbox = torch.cat([bbox, visual_bbox], dim=1)
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
visual_attention_mask = torch.ones(visual_shape, device=device)
final_attention_mask = torch.cat([attention_mask, visual_attention_mask], dim=1)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if position_ids is None:
seq_length = input_shape[1]
position_ids = self.embeddings.position_ids[:, :seq_length]
position_ids = position_ids.expand_as(input_ids)
visual_position_ids = torch.arange(0, visual_shape[1], dtype=torch.long, device=device).repeat(
input_shape[0], 1
)
final_position_ids = torch.cat([position_ids, visual_position_ids], dim=1)
if bbox is None:
bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device)
text_layout_emb = self._calc_text_embeddings(
input_ids=input_ids,
bbox=bbox,
token_type_ids=token_type_ids,
position_ids=position_ids,
)
visual_emb = self._calc_img_embeddings(
image=image,
bbox=visual_bbox,
position_ids=visual_position_ids,
)
final_emb = torch.cat([text_layout_emb, visual_emb], dim=1)
extended_attention_mask = final_attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.to(dtype=next(self.parameters()).dtype)
else:
head_mask = [None] * self.config.num_hidden_layers
encoder_outputs = self.encoder(
final_emb,
extended_attention_mask,
bbox=final_bbox,
position_ids=final_position_ids,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
class LayoutLMv2ForTokenClassification(LayoutLMv2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.layoutlmv2 = LayoutLMv2Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def get_input_embeddings(self):
return self.layoutlmv2.embeddings.word_embeddings
def forward(
self,
input_ids=None,
bbox=None,
image=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlmv2(
input_ids=input_ids,
bbox=bbox,
image=image,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
seq_length = input_ids.size(1)
sequence_output, image_output = outputs[0][:, :seq_length], outputs[0][:, seq_length:]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class LayoutLMv2ForRelationExtraction(LayoutLMv2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.layoutlmv2 = LayoutLMv2Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.decoder = config.decoder_name
self.decoder = config.decoder_name
if self.decoder == 're':
self.extractor = REDecoder(config, config.hidden_size)
elif self.decoder == 'gose':
self.extractor = GOSE(config)
self.init_weights()
def forward(
self,
input_ids,
bbox,
labels=None,
image=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
entities=None,
relations=None,
):
outputs = self.layoutlmv2(
input_ids=input_ids,
bbox=bbox,
image=image,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
)
seq_length = input_ids.size(1)
sequence_output, image_output = outputs[0][:, :seq_length], outputs[0][:, seq_length:]
sequence_output = self.dropout(sequence_output)
loss, pred_relations = self.extractor(sequence_output, entities, relations, bbox)
| return ReOutput( | 0 | 2023-10-19 14:36:32+00:00 | 12k |
mklissa/dceo | dopamine/discrete_domains/run_experiment.py | [
{
"identifier": "dqn_agent",
"path": "dopamine/agents/dqn/dqn_agent.py",
"snippet": "NATURE_DQN_OBSERVATION_SHAPE = atari_lib.NATURE_DQN_OBSERVATION_SHAPE\nNATURE_DQN_DTYPE = atari_lib.NATURE_DQN_DTYPE\nNATURE_DQN_STACK_SIZE = atari_lib.NATURE_DQN_STACK_SIZE\ndef linearly_decaying_epsilon(decay_period, ... | import os
import sys
import time
import gin.tf
import numpy as np
import tensorflow as tf
from absl import logging
from dopamine.agents.dqn import dqn_agent
from dopamine.agents.implicit_quantile import implicit_quantile_agent
from dopamine.agents.rainbow import rainbow_agent
from dopamine.discrete_domains import atari_lib
from dopamine.discrete_domains import checkpointer
from dopamine.discrete_domains import iteration_statistics
from dopamine.discrete_domains import logger
from dopamine.jax.agents.dqn import dqn_agent as jax_dqn_agent
from dopamine.jax.agents.full_rainbow import full_rainbow_agent
from dopamine.jax.agents.full_rainbow import full_rainbow_dceo
from dopamine.jax.agents.implicit_quantile import implicit_quantile_agent as jax_implicit_quantile_agent
from dopamine.jax.agents.quantile import quantile_agent as jax_quantile_agent
from dopamine.jax.agents.rainbow import rainbow_agent as jax_rainbow_agent
from dopamine.metrics import collector_dispatcher
from dopamine.metrics import statistics_instance | 7,802 | self._end_episode(reward, is_terminal)
action = self._agent.begin_episode(observation)
else:
action = self._agent.step(reward, observation)
self._end_episode(reward, is_terminal)
return step_number, total_reward
def _run_one_phase(self, min_steps, statistics, run_mode_str):
"""Runs the agent/environment loop until a desired number of steps.
We follow the Machado et al., 2017 convention of running full episodes,
and terminating once we've run a minimum number of steps.
Args:
min_steps: int, minimum number of steps to generate in this phase.
statistics: `IterationStatistics` object which records the experimental
results.
run_mode_str: str, describes the run mode for this agent.
Returns:
Tuple containing the number of steps taken in this phase (int), the sum of
returns (float), and the number of episodes performed (int).
"""
step_count = 0
num_episodes = 0
sum_returns = 0.
while step_count < min_steps:
episode_length, episode_return = self._run_one_episode()
statistics.append({
'{}_episode_lengths'.format(run_mode_str): episode_length,
'{}_episode_returns'.format(run_mode_str): episode_return
})
step_count += episode_length
sum_returns += episode_return
num_episodes += 1
if self._fine_grained_print_to_console:
# We use sys.stdout.write instead of logging so as to flush frequently
# without generating a line break.
sys.stdout.write('Steps executed: {} '.format(step_count) +
'Episode length: {} '.format(episode_length) +
'Return: {}\r'.format(episode_return))
sys.stdout.flush()
return step_count, sum_returns, num_episodes
def _run_train_phase(self, statistics):
"""Run training phase.
Args:
statistics: `IterationStatistics` object which records the experimental
results. Note - This object is modified by this method.
Returns:
num_episodes: int, The number of episodes run in this phase.
average_reward: float, The average reward generated in this phase.
average_steps_per_second: float, The average number of steps per second.
"""
# Perform the training phase, during which the agent learns.
self._agent.eval_mode = False
start_time = time.time()
number_steps, sum_returns, num_episodes = self._run_one_phase(
self._training_steps, statistics, 'train')
average_return = sum_returns / num_episodes if num_episodes > 0 else 0.0
statistics.append({'train_average_return': average_return})
time_delta = time.time() - start_time
average_steps_per_second = number_steps / time_delta
statistics.append(
{'train_average_steps_per_second': average_steps_per_second})
logging.info('Average undiscounted return per training episode: %.2f',
average_return)
logging.info('Average training steps per second: %.2f',
average_steps_per_second)
return num_episodes, average_return, average_steps_per_second
def _run_eval_phase(self, statistics):
"""Run evaluation phase.
Args:
statistics: `IterationStatistics` object which records the experimental
results. Note - This object is modified by this method.
Returns:
num_episodes: int, The number of episodes run in this phase.
average_reward: float, The average reward generated in this phase.
"""
# Perform the evaluation phase -- no learning.
self._agent.eval_mode = True
_, sum_returns, num_episodes = self._run_one_phase(
self._evaluation_steps, statistics, 'eval')
average_return = sum_returns / num_episodes if num_episodes > 0 else 0.0
logging.info('Average undiscounted return per evaluation episode: %.2f',
average_return)
statistics.append({'eval_average_return': average_return})
return num_episodes, average_return
def _run_one_iteration(self, iteration):
"""Runs one iteration of agent/environment interaction.
An iteration involves running several episodes until a certain number of
steps are obtained. The interleaving of train/eval phases implemented here
are to match the implementation of (Mnih et al., 2015).
Args:
iteration: int, current iteration number, used as a global_step for saving
Tensorboard summaries.
Returns:
A dict containing summary statistics for this iteration.
"""
statistics = iteration_statistics.IterationStatistics()
logging.info('Starting iteration %d', iteration)
num_episodes_train, average_reward_train, average_steps_per_second = (
self._run_train_phase(statistics))
num_episodes_eval, average_reward_eval = self._run_eval_phase(
statistics)
if self._has_collector_dispatcher:
self._collector_dispatcher.write([
| # coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module defining classes and helper methods for general agents."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def load_gin_configs(gin_files, gin_bindings):
"""Loads gin configuration files.
Args:
gin_files: list, of paths to the gin configuration files for this
experiment.
gin_bindings: list, of gin parameter bindings to override the values in
the config files.
"""
gin.parse_config_files_and_bindings(gin_files,
bindings=gin_bindings,
skip_unknown=False)
@gin.configurable
def create_agent(sess, environment, agent_name=None, summary_writer=None,
debug_mode=False):
"""Creates an agent.
Args:
sess: A `tf.compat.v1.Session` object for running associated ops.
environment: A gym environment (e.g. Atari 2600).
agent_name: str, name of the agent to create.
summary_writer: A Tensorflow summary writer to pass to the agent
for in-agent training statistics in Tensorboard.
debug_mode: bool, whether to output Tensorboard summaries. If set to true,
the agent will output in-episode statistics to Tensorboard. Disabled by
default as this results in slower training.
Returns:
agent: An RL agent.
Raises:
ValueError: If `agent_name` is not in supported list.
"""
assert agent_name is not None
if not debug_mode:
summary_writer = None
if agent_name.startswith('dqn'):
return dqn_agent.DQNAgent(sess, num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'rainbow':
return rainbow_agent.RainbowAgent(
sess, num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'implicit_quantile':
return implicit_quantile_agent.ImplicitQuantileAgent(
sess, num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'jax_dqn':
return jax_dqn_agent.JaxDQNAgent(num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'jax_quantile':
return jax_quantile_agent.JaxQuantileAgent(
num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'jax_rainbow':
return jax_rainbow_agent.JaxRainbowAgent(
num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'full_rainbow':
return full_rainbow_agent.JaxFullRainbowAgent(
num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'full_rainbow_dceo':
return full_rainbow_dceo.JaxFullRainbowAgentDCEO(
num_actions=environment.action_space.n,
summary_writer=summary_writer)
elif agent_name == 'jax_implicit_quantile':
return jax_implicit_quantile_agent.JaxImplicitQuantileAgent(
num_actions=environment.action_space.n,
summary_writer=summary_writer)
else:
raise ValueError('Unknown agent: {}'.format(agent_name))
@gin.configurable
def create_runner(base_dir, schedule='continuous_train_and_eval'):
"""Creates an experiment Runner.
Args:
base_dir: str, base directory for hosting all subdirectories.
schedule: string, which type of Runner to use.
Returns:
runner: A `Runner` like object.
Raises:
ValueError: When an unknown schedule is encountered.
"""
assert base_dir is not None
# Continuously runs training and evaluation until max num_iterations is hit.
if schedule == 'continuous_train_and_eval':
return Runner(base_dir, create_agent)
# Continuously runs training until max num_iterations is hit.
elif schedule == 'continuous_train':
return TrainRunner(base_dir, create_agent)
else:
raise ValueError('Unknown schedule: {}'.format(schedule))
@gin.configurable
class Runner(object):
"""Object that handles running Dopamine experiments.
Here we use the term 'experiment' to mean simulating interactions between the
agent and the environment and reporting some statistics pertaining to these
interactions.
A simple scenario to train a DQN agent is as follows:
```python
import dopamine.discrete_domains.atari_lib
base_dir = '/tmp/simple_example'
def create_agent(sess, environment):
return dqn_agent.DQNAgent(sess, num_actions=environment.action_space.n)
runner = Runner(base_dir, create_agent, atari_lib.create_atari_environment)
runner.run()
```
"""
def __init__(self,
base_dir,
create_agent_fn,
create_environment_fn=atari_lib.create_atari_environment,
checkpoint_file_prefix='ckpt',
logging_file_prefix='log',
log_every_n=1,
num_iterations=200,
training_steps=250000,
evaluation_steps=125000,
max_steps_per_episode=27000,
clip_rewards=True,
use_legacy_logger=True,
fine_grained_print_to_console=True):
"""Initialize the Runner object in charge of running a full experiment.
Args:
base_dir: str, the base directory to host all required sub-directories.
create_agent_fn: A function that takes as args a Tensorflow session and an
environment, and returns an agent.
create_environment_fn: A function which receives a problem name and
creates a Gym environment for that problem (e.g. an Atari 2600 game).
checkpoint_file_prefix: str, the prefix to use for checkpoint files.
logging_file_prefix: str, prefix to use for the log files.
log_every_n: int, the frequency for writing logs.
num_iterations: int, the iteration number threshold (must be greater than
start_iteration).
training_steps: int, the number of training steps to perform.
evaluation_steps: int, the number of evaluation steps to perform.
max_steps_per_episode: int, maximum number of steps after which an episode
terminates.
clip_rewards: bool, whether to clip rewards in [-1, 1].
use_legacy_logger: bool, whether to use the legacy Logger. This will be
deprecated soon, replaced with the new CollectorDispatcher setup.
fine_grained_print_to_console: bool, whether to print fine-grained
progress to console (useful for debugging).
This constructor will take the following actions:
- Initialize an environment.
- Initialize a `tf.compat.v1.Session`.
- Initialize a logger.
- Initialize an agent.
- Reload from the latest checkpoint, if available, and initialize the
Checkpointer object.
"""
assert base_dir is not None
self._legacy_logger_enabled = use_legacy_logger
self._fine_grained_print_to_console_enabled = fine_grained_print_to_console
self._logging_file_prefix = logging_file_prefix
self._log_every_n = log_every_n
self._num_iterations = num_iterations
self._training_steps = training_steps
self._evaluation_steps = evaluation_steps
self._max_steps_per_episode = max_steps_per_episode
self._base_dir = base_dir
self._clip_rewards = clip_rewards
self._create_directories()
self._environment = create_environment_fn()
# The agent is now in charge of setting up the session.
self._sess = None
# We're using a bit of a hack in that we pass in _base_dir instead of an
# actually SummaryWriter. This is because the agent is now in charge of the
# session, but needs to create the SummaryWriter before creating the ops,
# and in order to do so, it requires the base directory.
self._agent = create_agent_fn(self._sess, self._environment,
summary_writer=self._base_dir)
if hasattr(self._agent, '_sess'):
self._sess = self._agent._sess
self._summary_writer = self._agent.summary_writer
self._initialize_checkpointer_and_maybe_resume(checkpoint_file_prefix)
# Create a collector dispatcher for metrics reporting.
self._collector_dispatcher = collector_dispatcher.CollectorDispatcher(
self._base_dir)
set_collector_dispatcher_fn = getattr(
self._agent, 'set_collector_dispatcher', None)
if callable(set_collector_dispatcher_fn):
set_collector_dispatcher_fn(self._collector_dispatcher)
@property
def _use_legacy_logger(self):
if not hasattr(self, '_legacy_logger_enabled'):
return True
return self._legacy_logger_enabled
@property
def _has_collector_dispatcher(self):
if not hasattr(self, '_collector_dispatcher'):
return False
return True
@property
def _fine_grained_print_to_console(self):
if not hasattr(self, '_fine_grained_print_to_console_enabled'):
return True
return self._fine_grained_print_to_console_enabled
def _create_directories(self):
"""Create necessary sub-directories."""
self._checkpoint_dir = os.path.join(self._base_dir, 'checkpoints')
if self._use_legacy_logger:
logging.warning(
'DEPRECATION WARNING: Logger is being deprecated. '
'Please switch to CollectorDispatcher!')
self._logger = logger.Logger(os.path.join(self._base_dir, 'logs'))
def _initialize_checkpointer_and_maybe_resume(self, checkpoint_file_prefix):
"""Reloads the latest checkpoint if it exists.
This method will first create a `Checkpointer` object and then call
`checkpointer.get_latest_checkpoint_number` to determine if there is a valid
checkpoint in self._checkpoint_dir, and what the largest file number is.
If a valid checkpoint file is found, it will load the bundled data from this
file and will pass it to the agent for it to reload its data.
If the agent is able to successfully unbundle, this method will verify that
the unbundled data contains the keys,'logs' and 'current_iteration'. It will
then load the `Logger`'s data from the bundle, and will return the iteration
number keyed by 'current_iteration' as one of the return values (along with
the `Checkpointer` object).
Args:
checkpoint_file_prefix: str, the checkpoint file prefix.
Returns:
start_iteration: int, the iteration number to start the experiment from.
experiment_checkpointer: `Checkpointer` object for the experiment.
"""
self._checkpointer = checkpointer.Checkpointer(self._checkpoint_dir,
checkpoint_file_prefix)
self._start_iteration = 0
# Check if checkpoint exists. Note that the existence of checkpoint 0 means
# that we have finished iteration 0 (so we will start from iteration 1).
latest_checkpoint_version = checkpointer.get_latest_checkpoint_number(
self._checkpoint_dir)
if latest_checkpoint_version >= 0:
experiment_data = self._checkpointer.load_checkpoint(
latest_checkpoint_version)
if self._agent.unbundle(
self._checkpoint_dir, latest_checkpoint_version, experiment_data):
if experiment_data is not None:
assert 'logs' in experiment_data
assert 'current_iteration' in experiment_data
if self._use_legacy_logger:
self._logger.data = experiment_data['logs']
self._start_iteration = experiment_data['current_iteration'] + 1
logging.info('Reloaded checkpoint and will start from iteration %d',
self._start_iteration)
def _initialize_episode(self):
"""Initialization for a new episode.
Returns:
action: int, the initial action chosen by the agent.
"""
initial_observation = self._environment.reset()
return self._agent.begin_episode(initial_observation)
def _run_one_step(self, action):
"""Executes a single step in the environment.
Args:
action: int, the action to perform in the environment.
Returns:
The observation, reward, and is_terminal values returned from the
environment.
"""
observation, reward, is_terminal, _ = self._environment.step(action)
return observation, reward, is_terminal
def _end_episode(self, reward, terminal=True):
"""Finalizes an episode run.
Args:
reward: float, the last reward from the environment.
terminal: bool, whether the last state-action led to a terminal state.
"""
if isinstance(self._agent, jax_dqn_agent.JaxDQNAgent):
self._agent.end_episode(reward, terminal)
else:
# TODO(joshgreaves): Add terminal signal to TF dopamine agents
self._agent.end_episode(reward)
def _run_one_episode(self):
"""Executes a full trajectory of the agent interacting with the environment.
Returns:
The number of steps taken and the total reward.
"""
step_number = 0
total_reward = 0.
action = self._initialize_episode()
is_terminal = False
# Keep interacting until we reach a terminal state.
while True:
observation, reward, is_terminal = self._run_one_step(action)
total_reward += reward
step_number += 1
if self._clip_rewards:
# Perform reward clipping.
reward = np.clip(reward, -1, 1)
if (self._environment.game_over or
step_number == self._max_steps_per_episode):
# Stop the run loop once we reach the true end of episode.
break
elif is_terminal:
# If we lose a life but the episode is not over, signal an artificial
# end of episode to the agent.
self._end_episode(reward, is_terminal)
action = self._agent.begin_episode(observation)
else:
action = self._agent.step(reward, observation)
self._end_episode(reward, is_terminal)
return step_number, total_reward
def _run_one_phase(self, min_steps, statistics, run_mode_str):
"""Runs the agent/environment loop until a desired number of steps.
We follow the Machado et al., 2017 convention of running full episodes,
and terminating once we've run a minimum number of steps.
Args:
min_steps: int, minimum number of steps to generate in this phase.
statistics: `IterationStatistics` object which records the experimental
results.
run_mode_str: str, describes the run mode for this agent.
Returns:
Tuple containing the number of steps taken in this phase (int), the sum of
returns (float), and the number of episodes performed (int).
"""
step_count = 0
num_episodes = 0
sum_returns = 0.
while step_count < min_steps:
episode_length, episode_return = self._run_one_episode()
statistics.append({
'{}_episode_lengths'.format(run_mode_str): episode_length,
'{}_episode_returns'.format(run_mode_str): episode_return
})
step_count += episode_length
sum_returns += episode_return
num_episodes += 1
if self._fine_grained_print_to_console:
# We use sys.stdout.write instead of logging so as to flush frequently
# without generating a line break.
sys.stdout.write('Steps executed: {} '.format(step_count) +
'Episode length: {} '.format(episode_length) +
'Return: {}\r'.format(episode_return))
sys.stdout.flush()
return step_count, sum_returns, num_episodes
def _run_train_phase(self, statistics):
"""Run training phase.
Args:
statistics: `IterationStatistics` object which records the experimental
results. Note - This object is modified by this method.
Returns:
num_episodes: int, The number of episodes run in this phase.
average_reward: float, The average reward generated in this phase.
average_steps_per_second: float, The average number of steps per second.
"""
# Perform the training phase, during which the agent learns.
self._agent.eval_mode = False
start_time = time.time()
number_steps, sum_returns, num_episodes = self._run_one_phase(
self._training_steps, statistics, 'train')
average_return = sum_returns / num_episodes if num_episodes > 0 else 0.0
statistics.append({'train_average_return': average_return})
time_delta = time.time() - start_time
average_steps_per_second = number_steps / time_delta
statistics.append(
{'train_average_steps_per_second': average_steps_per_second})
logging.info('Average undiscounted return per training episode: %.2f',
average_return)
logging.info('Average training steps per second: %.2f',
average_steps_per_second)
return num_episodes, average_return, average_steps_per_second
def _run_eval_phase(self, statistics):
"""Run evaluation phase.
Args:
statistics: `IterationStatistics` object which records the experimental
results. Note - This object is modified by this method.
Returns:
num_episodes: int, The number of episodes run in this phase.
average_reward: float, The average reward generated in this phase.
"""
# Perform the evaluation phase -- no learning.
self._agent.eval_mode = True
_, sum_returns, num_episodes = self._run_one_phase(
self._evaluation_steps, statistics, 'eval')
average_return = sum_returns / num_episodes if num_episodes > 0 else 0.0
logging.info('Average undiscounted return per evaluation episode: %.2f',
average_return)
statistics.append({'eval_average_return': average_return})
return num_episodes, average_return
def _run_one_iteration(self, iteration):
"""Runs one iteration of agent/environment interaction.
An iteration involves running several episodes until a certain number of
steps are obtained. The interleaving of train/eval phases implemented here
are to match the implementation of (Mnih et al., 2015).
Args:
iteration: int, current iteration number, used as a global_step for saving
Tensorboard summaries.
Returns:
A dict containing summary statistics for this iteration.
"""
statistics = iteration_statistics.IterationStatistics()
logging.info('Starting iteration %d', iteration)
num_episodes_train, average_reward_train, average_steps_per_second = (
self._run_train_phase(statistics))
num_episodes_eval, average_reward_eval = self._run_eval_phase(
statistics)
if self._has_collector_dispatcher:
self._collector_dispatcher.write([ | statistics_instance.StatisticsInstance('Train/NumEpisodes', | 14 | 2023-10-15 22:14:16+00:00 | 12k |
BurgerBurgerBurger/AA | run.py | [
{
"identifier": "add_args",
"path": "args.py",
"snippet": "def add_args(parser):\n parser.add_argument(\"--do_train\", action=\"store_true\")\n parser.add_argument(\"--data_dir\", default=\"./dataset/docred\", type=str)\n parser.add_argument(\"--transformer_type\", default=\"bert\", type=str)\n... | import argparse
import os
import numpy as np
import torch
import ujson as json
import pandas as pd
import pickle
from torch.cuda.amp import GradScaler
from torch.utils.data import DataLoader
from transformers import AutoConfig, AutoModel, AutoTokenizer
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from args import add_args
from model import DocREModel
from utils import set_seed, collate_fn, create_directory
from prepro import read_docred
from evaluation import to_official, official_evaluate, merge_results
from tqdm import tqdm | 10,421 |
def load_input(batch, device, tag="dev"):
input = {'input_ids': batch[0].to(device),
'attention_mask': batch[1].to(device),
'labels': batch[2].to(device),
'entity_pos': batch[3],
'hts': batch[4],
'sent_pos': batch[5],
'sent_labels': batch[6].to(device) if (not batch[6] is None) and (batch[7] is None) else None,
'teacher_attns': batch[7].to(device) if not batch[7] is None else None,
'graph': batch[8],
'tag': tag
}
return input
def train(args, model, train_features, dev_features):
def finetune(features, optimizer, num_epoch, num_steps):
best_score = -1
train_dataloader = DataLoader(features, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn,
drop_last=True)
train_iterator = range(int(num_epoch))
total_steps = int(len(train_dataloader) * num_epoch // args.gradient_accumulation_steps)
warmup_steps = int(total_steps * args.warmup_ratio)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
num_training_steps=total_steps)
scaler = GradScaler()
print("Total steps: {}".format(total_steps))
print("Warmup steps: {}".format(warmup_steps))
for epoch in tqdm(train_iterator, desc='Train epoch'):
for step, batch in enumerate(train_dataloader):
model.zero_grad()
optimizer.zero_grad()
model.train()
inputs = load_input(batch, args.device)
outputs = model(**inputs)
loss = [outputs["loss"]["rel_loss"]]
if inputs["sent_labels"] is not None:
loss.append(outputs["loss"]["evi_loss"] * args.evi_lambda)
if inputs["teacher_attns"] is not None:
loss.append(outputs["loss"]["attn_loss"] * args.attn_lambda)
loss = sum(loss) / args.gradient_accumulation_steps
scaler.scale(loss).backward()
if step % args.gradient_accumulation_steps == 0:
if args.max_grad_norm > 0:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
scaler.step(optimizer)
scaler.update()
scheduler.step()
model.zero_grad()
num_steps += 1
if (step + 1) == len(train_dataloader) or (
args.evaluation_steps > 0 and num_steps % args.evaluation_steps == 0 and step % args.gradient_accumulation_steps == 0):
dev_scores, dev_output, official_results, results = evaluate(args, model, dev_features, tag="dev")
print(dev_output)
if dev_scores["dev_F1_ign"] > best_score:
best_score = dev_scores["dev_F1_ign"]
best_offi_results = official_results
best_results = results
best_output = dev_output
ckpt_file = os.path.join(args.save_path, "best.ckpt")
print(f"saving model checkpoint into {ckpt_file} ...")
torch.save(model.state_dict(), ckpt_file)
if epoch == train_iterator[-1]: # last epoch
ckpt_file = os.path.join(args.save_path, "last.ckpt")
print(f"saving model checkpoint into {ckpt_file} ...")
torch.save(model.state_dict(), ckpt_file)
pred_file = os.path.join(args.save_path, args.pred_file)
score_file = os.path.join(args.save_path, "scores.csv")
results_file = os.path.join(args.save_path, f"topk_{args.pred_file}")
dump_to_file(best_offi_results, pred_file, best_output, score_file, best_results, results_file)
return num_steps
new_layer = ["extractor", "bilinear", "graph"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in new_layer)], },
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in new_layer)], "lr": args.lr_added},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr_transformer, eps=args.adam_epsilon)
num_steps = 0
|
def load_input(batch, device, tag="dev"):
input = {'input_ids': batch[0].to(device),
'attention_mask': batch[1].to(device),
'labels': batch[2].to(device),
'entity_pos': batch[3],
'hts': batch[4],
'sent_pos': batch[5],
'sent_labels': batch[6].to(device) if (not batch[6] is None) and (batch[7] is None) else None,
'teacher_attns': batch[7].to(device) if not batch[7] is None else None,
'graph': batch[8],
'tag': tag
}
return input
def train(args, model, train_features, dev_features):
def finetune(features, optimizer, num_epoch, num_steps):
best_score = -1
train_dataloader = DataLoader(features, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn,
drop_last=True)
train_iterator = range(int(num_epoch))
total_steps = int(len(train_dataloader) * num_epoch // args.gradient_accumulation_steps)
warmup_steps = int(total_steps * args.warmup_ratio)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps,
num_training_steps=total_steps)
scaler = GradScaler()
print("Total steps: {}".format(total_steps))
print("Warmup steps: {}".format(warmup_steps))
for epoch in tqdm(train_iterator, desc='Train epoch'):
for step, batch in enumerate(train_dataloader):
model.zero_grad()
optimizer.zero_grad()
model.train()
inputs = load_input(batch, args.device)
outputs = model(**inputs)
loss = [outputs["loss"]["rel_loss"]]
if inputs["sent_labels"] is not None:
loss.append(outputs["loss"]["evi_loss"] * args.evi_lambda)
if inputs["teacher_attns"] is not None:
loss.append(outputs["loss"]["attn_loss"] * args.attn_lambda)
loss = sum(loss) / args.gradient_accumulation_steps
scaler.scale(loss).backward()
if step % args.gradient_accumulation_steps == 0:
if args.max_grad_norm > 0:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
scaler.step(optimizer)
scaler.update()
scheduler.step()
model.zero_grad()
num_steps += 1
if (step + 1) == len(train_dataloader) or (
args.evaluation_steps > 0 and num_steps % args.evaluation_steps == 0 and step % args.gradient_accumulation_steps == 0):
dev_scores, dev_output, official_results, results = evaluate(args, model, dev_features, tag="dev")
print(dev_output)
if dev_scores["dev_F1_ign"] > best_score:
best_score = dev_scores["dev_F1_ign"]
best_offi_results = official_results
best_results = results
best_output = dev_output
ckpt_file = os.path.join(args.save_path, "best.ckpt")
print(f"saving model checkpoint into {ckpt_file} ...")
torch.save(model.state_dict(), ckpt_file)
if epoch == train_iterator[-1]: # last epoch
ckpt_file = os.path.join(args.save_path, "last.ckpt")
print(f"saving model checkpoint into {ckpt_file} ...")
torch.save(model.state_dict(), ckpt_file)
pred_file = os.path.join(args.save_path, args.pred_file)
score_file = os.path.join(args.save_path, "scores.csv")
results_file = os.path.join(args.save_path, f"topk_{args.pred_file}")
dump_to_file(best_offi_results, pred_file, best_output, score_file, best_results, results_file)
return num_steps
new_layer = ["extractor", "bilinear", "graph"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in new_layer)], },
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in new_layer)], "lr": args.lr_added},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr_transformer, eps=args.adam_epsilon)
num_steps = 0 | set_seed(args) | 2 | 2023-10-20 05:53:25+00:00 | 12k |
xingchenshanyao/YOLOP-E | lib/dataset/bdd.py | [
{
"identifier": "AutoDriveDataset",
"path": "lib/dataset/AutoDriveDataset.py",
"snippet": "class AutoDriveDataset(Dataset):\n \"\"\"\n A general Dataset for some common function\n \"\"\"\n def __init__(self, cfg, is_train, inputsize=640, transform=None):\n \"\"\"\n initial all ... | import numpy as np
import json
from .AutoDriveDataset import AutoDriveDataset
from .convert import convert, id_dict, id_dict_single, id_dict_SDExpressway, id_dict_SDExpressway_single
from tqdm import tqdm | 9,689 |
single_cls = False # just detect vehicle
class BddDataset(AutoDriveDataset):
def __init__(self, cfg, is_train, inputsize, transform=None):
super().__init__(cfg, is_train, inputsize, transform)
self.db = self._get_db() # 加载数据集 # self.db = [{'image': '/home/xingchen/Study...3225df.jpg', 'label': array([[0. , ...7547441]]), 'mask': '/home/xingchen/Study...3225df.png', 'lane': '/home/xingchen/Study...3225df.png'}, ...]
self.cfg = cfg
def _get_db(self):
"""
get database from the annotation file
Inputs:
Returns:
gt_db: (list)database [a,b,c,...]
a: (dictionary){'image':, 'information':, ......}
image: image path
mask: path of the segmetation label
label: [cls_id, center_x//256, center_y//256, w//256, h//256] 256=IMAGE_SIZE
"""
print('building database...')
gt_db = []
height, width = self.shapes
for mask in tqdm(list(self.mask_list)): # 加载数据集和标签
mask_path = str(mask)
label_path = mask_path.replace(str(self.mask_root), str(self.label_root)).replace(".png", ".json")
image_path = mask_path.replace(str(self.mask_root), str(self.img_root)).replace(".png", ".jpg")
lane_path = mask_path.replace(str(self.mask_root), str(self.lane_root))
with open(label_path, 'r') as f:
label = json.load(f)
# # BDD100k
# data = label['frames'][0]['objects']
# data = self.filter_data(data)
# gt = np.zeros((len(data), 5))
# for idx, obj in enumerate(data):
# category = obj['category']
# if category == "traffic light":
# color = obj['attributes']['trafficLightColor']
# category = "tl_" + color
# if category in id_dict.keys():
# x1 = float(obj['box2d']['x1'])
# y1 = float(obj['box2d']['y1'])
# x2 = float(obj['box2d']['x2'])
# y2 = float(obj['box2d']['y2'])
# cls_id = id_dict[category]
# gt[idx][0] = cls_id
# box = convert((width, height), (x1, x2, y1, y2))
# gt[idx][1:] = list(box)
# SDExpressway
data = label['shapes']
data = self.filter_data(data)
gt = np.zeros((len(data), 5))
for idx, obj in enumerate(data):
category = obj['label'] # 类别
if category in id_dict_SDExpressway.keys():
x1 = float(obj['points'][0][0])
y1 = float(obj['points'][0][1])
x2 = float(obj['points'][1][0])
y2 = float(obj['points'][1][1])
if x1>x2:
x1, x2 = x2, x1
if y1>y2:
y1, y2 = y2, y1
cls_id = id_dict_SDExpressway[category]
# if single_cls: # 20230816
# cls_id=0
gt[idx][0] = cls_id
box = convert((width, height), (x1, x2, y1, y2))
gt[idx][1:] = list(box)
rec = [{
'image': image_path,
'label': gt,
'mask': mask_path,
'lane': lane_path
}]
gt_db += rec
print('database build finish')
return gt_db
# # BDD100k数据集
# def filter_data(self, data):
# remain = []
# for obj in data:
# if 'box2d' in obj.keys(): # obj.has_key('box2d'):
# if single_cls: # 只预测车辆
# if obj['category'] in id_dict_single.keys():
# remain.append(obj)
# else:
# remain.append(obj)
# return remain
# SDExpressway数据集
def filter_data(self, data):
remain = []
for obj in data:
if 'points' in obj.keys(): # obj.has_key('box2d'):
if single_cls:
|
single_cls = False # just detect vehicle
class BddDataset(AutoDriveDataset):
def __init__(self, cfg, is_train, inputsize, transform=None):
super().__init__(cfg, is_train, inputsize, transform)
self.db = self._get_db() # 加载数据集 # self.db = [{'image': '/home/xingchen/Study...3225df.jpg', 'label': array([[0. , ...7547441]]), 'mask': '/home/xingchen/Study...3225df.png', 'lane': '/home/xingchen/Study...3225df.png'}, ...]
self.cfg = cfg
def _get_db(self):
"""
get database from the annotation file
Inputs:
Returns:
gt_db: (list)database [a,b,c,...]
a: (dictionary){'image':, 'information':, ......}
image: image path
mask: path of the segmetation label
label: [cls_id, center_x//256, center_y//256, w//256, h//256] 256=IMAGE_SIZE
"""
print('building database...')
gt_db = []
height, width = self.shapes
for mask in tqdm(list(self.mask_list)): # 加载数据集和标签
mask_path = str(mask)
label_path = mask_path.replace(str(self.mask_root), str(self.label_root)).replace(".png", ".json")
image_path = mask_path.replace(str(self.mask_root), str(self.img_root)).replace(".png", ".jpg")
lane_path = mask_path.replace(str(self.mask_root), str(self.lane_root))
with open(label_path, 'r') as f:
label = json.load(f)
# # BDD100k
# data = label['frames'][0]['objects']
# data = self.filter_data(data)
# gt = np.zeros((len(data), 5))
# for idx, obj in enumerate(data):
# category = obj['category']
# if category == "traffic light":
# color = obj['attributes']['trafficLightColor']
# category = "tl_" + color
# if category in id_dict.keys():
# x1 = float(obj['box2d']['x1'])
# y1 = float(obj['box2d']['y1'])
# x2 = float(obj['box2d']['x2'])
# y2 = float(obj['box2d']['y2'])
# cls_id = id_dict[category]
# gt[idx][0] = cls_id
# box = convert((width, height), (x1, x2, y1, y2))
# gt[idx][1:] = list(box)
# SDExpressway
data = label['shapes']
data = self.filter_data(data)
gt = np.zeros((len(data), 5))
for idx, obj in enumerate(data):
category = obj['label'] # 类别
if category in id_dict_SDExpressway.keys():
x1 = float(obj['points'][0][0])
y1 = float(obj['points'][0][1])
x2 = float(obj['points'][1][0])
y2 = float(obj['points'][1][1])
if x1>x2:
x1, x2 = x2, x1
if y1>y2:
y1, y2 = y2, y1
cls_id = id_dict_SDExpressway[category]
# if single_cls: # 20230816
# cls_id=0
gt[idx][0] = cls_id
box = convert((width, height), (x1, x2, y1, y2))
gt[idx][1:] = list(box)
rec = [{
'image': image_path,
'label': gt,
'mask': mask_path,
'lane': lane_path
}]
gt_db += rec
print('database build finish')
return gt_db
# # BDD100k数据集
# def filter_data(self, data):
# remain = []
# for obj in data:
# if 'box2d' in obj.keys(): # obj.has_key('box2d'):
# if single_cls: # 只预测车辆
# if obj['category'] in id_dict_single.keys():
# remain.append(obj)
# else:
# remain.append(obj)
# return remain
# SDExpressway数据集
def filter_data(self, data):
remain = []
for obj in data:
if 'points' in obj.keys(): # obj.has_key('box2d'):
if single_cls: | if obj['label'] in id_dict_SDExpressway_single.keys(): | 1 | 2023-10-24 02:08:25+00:00 | 12k |
Subsets and Splits
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have consistent code formatting levels across multiple scales (2k, 4k, 8k, 12k) and reveals the structured formatting patterns within these repositories.
SQL Console for tianyang/repobench_python_v1.1
Compares cross-file and in-file code structure patterns across different complexity levels, revealing how file organization strategies vary with code size and potentially informing better code architecture decisions.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that have complete performance data across all seven code complexity levels, revealing consistent benchmarking patterns across different code sizes.
SQL Console for tianyang/repobench_python_v1.1
Identifies repositories that contain all 7 distinct quality levels (2k through 32k), revealing complete datasets that might be useful for comprehensive analysis.